1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/kmemleak.h> 21 #include <linux/seq_file.h> 22 #include <linux/memblock.h> 23 #include <linux/bootmem.h> 24 25 #include <asm/sections.h> 26 #include <linux/io.h> 27 28 #include "internal.h" 29 30 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 31 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 32 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 33 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 34 #endif 35 36 struct memblock memblock __initdata_memblock = { 37 .memory.regions = memblock_memory_init_regions, 38 .memory.cnt = 1, /* empty dummy entry */ 39 .memory.max = INIT_MEMBLOCK_REGIONS, 40 .memory.name = "memory", 41 42 .reserved.regions = memblock_reserved_init_regions, 43 .reserved.cnt = 1, /* empty dummy entry */ 44 .reserved.max = INIT_MEMBLOCK_REGIONS, 45 .reserved.name = "reserved", 46 47 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 48 .physmem.regions = memblock_physmem_init_regions, 49 .physmem.cnt = 1, /* empty dummy entry */ 50 .physmem.max = INIT_PHYSMEM_REGIONS, 51 .physmem.name = "physmem", 52 #endif 53 54 .bottom_up = false, 55 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 56 }; 57 58 int memblock_debug __initdata_memblock; 59 static bool system_has_some_mirror __initdata_memblock = false; 60 static int memblock_can_resize __initdata_memblock; 61 static int memblock_memory_in_slab __initdata_memblock = 0; 62 static int memblock_reserved_in_slab __initdata_memblock = 0; 63 64 ulong __init_memblock choose_memblock_flags(void) 65 { 66 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 67 } 68 69 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 70 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 71 { 72 return *size = min(*size, PHYS_ADDR_MAX - base); 73 } 74 75 /* 76 * Address comparison utilities 77 */ 78 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 79 phys_addr_t base2, phys_addr_t size2) 80 { 81 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 82 } 83 84 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 85 phys_addr_t base, phys_addr_t size) 86 { 87 unsigned long i; 88 89 for (i = 0; i < type->cnt; i++) 90 if (memblock_addrs_overlap(base, size, type->regions[i].base, 91 type->regions[i].size)) 92 break; 93 return i < type->cnt; 94 } 95 96 /* 97 * __memblock_find_range_bottom_up - find free area utility in bottom-up 98 * @start: start of candidate range 99 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 100 * @size: size of free area to find 101 * @align: alignment of free area to find 102 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 103 * @flags: pick from blocks based on memory attributes 104 * 105 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 106 * 107 * RETURNS: 108 * Found address on success, 0 on failure. 109 */ 110 static phys_addr_t __init_memblock 111 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 112 phys_addr_t size, phys_addr_t align, int nid, 113 ulong flags) 114 { 115 phys_addr_t this_start, this_end, cand; 116 u64 i; 117 118 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 119 this_start = clamp(this_start, start, end); 120 this_end = clamp(this_end, start, end); 121 122 cand = round_up(this_start, align); 123 if (cand < this_end && this_end - cand >= size) 124 return cand; 125 } 126 127 return 0; 128 } 129 130 /** 131 * __memblock_find_range_top_down - find free area utility, in top-down 132 * @start: start of candidate range 133 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 134 * @size: size of free area to find 135 * @align: alignment of free area to find 136 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 137 * @flags: pick from blocks based on memory attributes 138 * 139 * Utility called from memblock_find_in_range_node(), find free area top-down. 140 * 141 * RETURNS: 142 * Found address on success, 0 on failure. 143 */ 144 static phys_addr_t __init_memblock 145 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 146 phys_addr_t size, phys_addr_t align, int nid, 147 ulong flags) 148 { 149 phys_addr_t this_start, this_end, cand; 150 u64 i; 151 152 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 153 NULL) { 154 this_start = clamp(this_start, start, end); 155 this_end = clamp(this_end, start, end); 156 157 if (this_end < size) 158 continue; 159 160 cand = round_down(this_end - size, align); 161 if (cand >= this_start) 162 return cand; 163 } 164 165 return 0; 166 } 167 168 /** 169 * memblock_find_in_range_node - find free area in given range and node 170 * @size: size of free area to find 171 * @align: alignment of free area to find 172 * @start: start of candidate range 173 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 174 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 175 * @flags: pick from blocks based on memory attributes 176 * 177 * Find @size free area aligned to @align in the specified range and node. 178 * 179 * When allocation direction is bottom-up, the @start should be greater 180 * than the end of the kernel image. Otherwise, it will be trimmed. The 181 * reason is that we want the bottom-up allocation just near the kernel 182 * image so it is highly likely that the allocated memory and the kernel 183 * will reside in the same node. 184 * 185 * If bottom-up allocation failed, will try to allocate memory top-down. 186 * 187 * RETURNS: 188 * Found address on success, 0 on failure. 189 */ 190 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 191 phys_addr_t align, phys_addr_t start, 192 phys_addr_t end, int nid, ulong flags) 193 { 194 phys_addr_t kernel_end, ret; 195 196 /* pump up @end */ 197 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 198 end = memblock.current_limit; 199 200 /* avoid allocating the first page */ 201 start = max_t(phys_addr_t, start, PAGE_SIZE); 202 end = max(start, end); 203 kernel_end = __pa_symbol(_end); 204 205 /* 206 * try bottom-up allocation only when bottom-up mode 207 * is set and @end is above the kernel image. 208 */ 209 if (memblock_bottom_up() && end > kernel_end) { 210 phys_addr_t bottom_up_start; 211 212 /* make sure we will allocate above the kernel */ 213 bottom_up_start = max(start, kernel_end); 214 215 /* ok, try bottom-up allocation first */ 216 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 217 size, align, nid, flags); 218 if (ret) 219 return ret; 220 221 /* 222 * we always limit bottom-up allocation above the kernel, 223 * but top-down allocation doesn't have the limit, so 224 * retrying top-down allocation may succeed when bottom-up 225 * allocation failed. 226 * 227 * bottom-up allocation is expected to be fail very rarely, 228 * so we use WARN_ONCE() here to see the stack trace if 229 * fail happens. 230 */ 231 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); 232 } 233 234 return __memblock_find_range_top_down(start, end, size, align, nid, 235 flags); 236 } 237 238 /** 239 * memblock_find_in_range - find free area in given range 240 * @start: start of candidate range 241 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 242 * @size: size of free area to find 243 * @align: alignment of free area to find 244 * 245 * Find @size free area aligned to @align in the specified range. 246 * 247 * RETURNS: 248 * Found address on success, 0 on failure. 249 */ 250 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 251 phys_addr_t end, phys_addr_t size, 252 phys_addr_t align) 253 { 254 phys_addr_t ret; 255 ulong flags = choose_memblock_flags(); 256 257 again: 258 ret = memblock_find_in_range_node(size, align, start, end, 259 NUMA_NO_NODE, flags); 260 261 if (!ret && (flags & MEMBLOCK_MIRROR)) { 262 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 263 &size); 264 flags &= ~MEMBLOCK_MIRROR; 265 goto again; 266 } 267 268 return ret; 269 } 270 271 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 272 { 273 type->total_size -= type->regions[r].size; 274 memmove(&type->regions[r], &type->regions[r + 1], 275 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 276 type->cnt--; 277 278 /* Special case for empty arrays */ 279 if (type->cnt == 0) { 280 WARN_ON(type->total_size != 0); 281 type->cnt = 1; 282 type->regions[0].base = 0; 283 type->regions[0].size = 0; 284 type->regions[0].flags = 0; 285 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 286 } 287 } 288 289 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 290 /** 291 * Discard memory and reserved arrays if they were allocated 292 */ 293 void __init memblock_discard(void) 294 { 295 phys_addr_t addr, size; 296 297 if (memblock.reserved.regions != memblock_reserved_init_regions) { 298 addr = __pa(memblock.reserved.regions); 299 size = PAGE_ALIGN(sizeof(struct memblock_region) * 300 memblock.reserved.max); 301 __memblock_free_late(addr, size); 302 } 303 304 if (memblock.memory.regions != memblock_memory_init_regions) { 305 addr = __pa(memblock.memory.regions); 306 size = PAGE_ALIGN(sizeof(struct memblock_region) * 307 memblock.memory.max); 308 __memblock_free_late(addr, size); 309 } 310 } 311 #endif 312 313 /** 314 * memblock_double_array - double the size of the memblock regions array 315 * @type: memblock type of the regions array being doubled 316 * @new_area_start: starting address of memory range to avoid overlap with 317 * @new_area_size: size of memory range to avoid overlap with 318 * 319 * Double the size of the @type regions array. If memblock is being used to 320 * allocate memory for a new reserved regions array and there is a previously 321 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 322 * waiting to be reserved, ensure the memory used by the new array does 323 * not overlap. 324 * 325 * RETURNS: 326 * 0 on success, -1 on failure. 327 */ 328 static int __init_memblock memblock_double_array(struct memblock_type *type, 329 phys_addr_t new_area_start, 330 phys_addr_t new_area_size) 331 { 332 struct memblock_region *new_array, *old_array; 333 phys_addr_t old_alloc_size, new_alloc_size; 334 phys_addr_t old_size, new_size, addr; 335 int use_slab = slab_is_available(); 336 int *in_slab; 337 338 /* We don't allow resizing until we know about the reserved regions 339 * of memory that aren't suitable for allocation 340 */ 341 if (!memblock_can_resize) 342 return -1; 343 344 /* Calculate new doubled size */ 345 old_size = type->max * sizeof(struct memblock_region); 346 new_size = old_size << 1; 347 /* 348 * We need to allocated new one align to PAGE_SIZE, 349 * so we can free them completely later. 350 */ 351 old_alloc_size = PAGE_ALIGN(old_size); 352 new_alloc_size = PAGE_ALIGN(new_size); 353 354 /* Retrieve the slab flag */ 355 if (type == &memblock.memory) 356 in_slab = &memblock_memory_in_slab; 357 else 358 in_slab = &memblock_reserved_in_slab; 359 360 /* Try to find some space for it. 361 * 362 * WARNING: We assume that either slab_is_available() and we use it or 363 * we use MEMBLOCK for allocations. That means that this is unsafe to 364 * use when bootmem is currently active (unless bootmem itself is 365 * implemented on top of MEMBLOCK which isn't the case yet) 366 * 367 * This should however not be an issue for now, as we currently only 368 * call into MEMBLOCK while it's still active, or much later when slab 369 * is active for memory hotplug operations 370 */ 371 if (use_slab) { 372 new_array = kmalloc(new_size, GFP_KERNEL); 373 addr = new_array ? __pa(new_array) : 0; 374 } else { 375 /* only exclude range when trying to double reserved.regions */ 376 if (type != &memblock.reserved) 377 new_area_start = new_area_size = 0; 378 379 addr = memblock_find_in_range(new_area_start + new_area_size, 380 memblock.current_limit, 381 new_alloc_size, PAGE_SIZE); 382 if (!addr && new_area_size) 383 addr = memblock_find_in_range(0, 384 min(new_area_start, memblock.current_limit), 385 new_alloc_size, PAGE_SIZE); 386 387 new_array = addr ? __va(addr) : NULL; 388 } 389 if (!addr) { 390 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 391 type->name, type->max, type->max * 2); 392 return -1; 393 } 394 395 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 396 type->name, type->max * 2, (u64)addr, 397 (u64)addr + new_size - 1); 398 399 /* 400 * Found space, we now need to move the array over before we add the 401 * reserved region since it may be our reserved array itself that is 402 * full. 403 */ 404 memcpy(new_array, type->regions, old_size); 405 memset(new_array + type->max, 0, old_size); 406 old_array = type->regions; 407 type->regions = new_array; 408 type->max <<= 1; 409 410 /* Free old array. We needn't free it if the array is the static one */ 411 if (*in_slab) 412 kfree(old_array); 413 else if (old_array != memblock_memory_init_regions && 414 old_array != memblock_reserved_init_regions) 415 memblock_free(__pa(old_array), old_alloc_size); 416 417 /* 418 * Reserve the new array if that comes from the memblock. Otherwise, we 419 * needn't do it 420 */ 421 if (!use_slab) 422 BUG_ON(memblock_reserve(addr, new_alloc_size)); 423 424 /* Update slab flag */ 425 *in_slab = use_slab; 426 427 return 0; 428 } 429 430 /** 431 * memblock_merge_regions - merge neighboring compatible regions 432 * @type: memblock type to scan 433 * 434 * Scan @type and merge neighboring compatible regions. 435 */ 436 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 437 { 438 int i = 0; 439 440 /* cnt never goes below 1 */ 441 while (i < type->cnt - 1) { 442 struct memblock_region *this = &type->regions[i]; 443 struct memblock_region *next = &type->regions[i + 1]; 444 445 if (this->base + this->size != next->base || 446 memblock_get_region_node(this) != 447 memblock_get_region_node(next) || 448 this->flags != next->flags) { 449 BUG_ON(this->base + this->size > next->base); 450 i++; 451 continue; 452 } 453 454 this->size += next->size; 455 /* move forward from next + 1, index of which is i + 2 */ 456 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 457 type->cnt--; 458 } 459 } 460 461 /** 462 * memblock_insert_region - insert new memblock region 463 * @type: memblock type to insert into 464 * @idx: index for the insertion point 465 * @base: base address of the new region 466 * @size: size of the new region 467 * @nid: node id of the new region 468 * @flags: flags of the new region 469 * 470 * Insert new memblock region [@base,@base+@size) into @type at @idx. 471 * @type must already have extra room to accommodate the new region. 472 */ 473 static void __init_memblock memblock_insert_region(struct memblock_type *type, 474 int idx, phys_addr_t base, 475 phys_addr_t size, 476 int nid, unsigned long flags) 477 { 478 struct memblock_region *rgn = &type->regions[idx]; 479 480 BUG_ON(type->cnt >= type->max); 481 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 482 rgn->base = base; 483 rgn->size = size; 484 rgn->flags = flags; 485 memblock_set_region_node(rgn, nid); 486 type->cnt++; 487 type->total_size += size; 488 } 489 490 /** 491 * memblock_add_range - add new memblock region 492 * @type: memblock type to add new region into 493 * @base: base address of the new region 494 * @size: size of the new region 495 * @nid: nid of the new region 496 * @flags: flags of the new region 497 * 498 * Add new memblock region [@base,@base+@size) into @type. The new region 499 * is allowed to overlap with existing ones - overlaps don't affect already 500 * existing regions. @type is guaranteed to be minimal (all neighbouring 501 * compatible regions are merged) after the addition. 502 * 503 * RETURNS: 504 * 0 on success, -errno on failure. 505 */ 506 int __init_memblock memblock_add_range(struct memblock_type *type, 507 phys_addr_t base, phys_addr_t size, 508 int nid, unsigned long flags) 509 { 510 bool insert = false; 511 phys_addr_t obase = base; 512 phys_addr_t end = base + memblock_cap_size(base, &size); 513 int idx, nr_new; 514 struct memblock_region *rgn; 515 516 if (!size) 517 return 0; 518 519 /* special case for empty array */ 520 if (type->regions[0].size == 0) { 521 WARN_ON(type->cnt != 1 || type->total_size); 522 type->regions[0].base = base; 523 type->regions[0].size = size; 524 type->regions[0].flags = flags; 525 memblock_set_region_node(&type->regions[0], nid); 526 type->total_size = size; 527 return 0; 528 } 529 repeat: 530 /* 531 * The following is executed twice. Once with %false @insert and 532 * then with %true. The first counts the number of regions needed 533 * to accommodate the new area. The second actually inserts them. 534 */ 535 base = obase; 536 nr_new = 0; 537 538 for_each_memblock_type(idx, type, rgn) { 539 phys_addr_t rbase = rgn->base; 540 phys_addr_t rend = rbase + rgn->size; 541 542 if (rbase >= end) 543 break; 544 if (rend <= base) 545 continue; 546 /* 547 * @rgn overlaps. If it separates the lower part of new 548 * area, insert that portion. 549 */ 550 if (rbase > base) { 551 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 552 WARN_ON(nid != memblock_get_region_node(rgn)); 553 #endif 554 WARN_ON(flags != rgn->flags); 555 nr_new++; 556 if (insert) 557 memblock_insert_region(type, idx++, base, 558 rbase - base, nid, 559 flags); 560 } 561 /* area below @rend is dealt with, forget about it */ 562 base = min(rend, end); 563 } 564 565 /* insert the remaining portion */ 566 if (base < end) { 567 nr_new++; 568 if (insert) 569 memblock_insert_region(type, idx, base, end - base, 570 nid, flags); 571 } 572 573 if (!nr_new) 574 return 0; 575 576 /* 577 * If this was the first round, resize array and repeat for actual 578 * insertions; otherwise, merge and return. 579 */ 580 if (!insert) { 581 while (type->cnt + nr_new > type->max) 582 if (memblock_double_array(type, obase, size) < 0) 583 return -ENOMEM; 584 insert = true; 585 goto repeat; 586 } else { 587 memblock_merge_regions(type); 588 return 0; 589 } 590 } 591 592 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 593 int nid) 594 { 595 return memblock_add_range(&memblock.memory, base, size, nid, 0); 596 } 597 598 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 599 { 600 phys_addr_t end = base + size - 1; 601 602 memblock_dbg("memblock_add: [%pa-%pa] %pF\n", 603 &base, &end, (void *)_RET_IP_); 604 605 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 606 } 607 608 /** 609 * memblock_isolate_range - isolate given range into disjoint memblocks 610 * @type: memblock type to isolate range for 611 * @base: base of range to isolate 612 * @size: size of range to isolate 613 * @start_rgn: out parameter for the start of isolated region 614 * @end_rgn: out parameter for the end of isolated region 615 * 616 * Walk @type and ensure that regions don't cross the boundaries defined by 617 * [@base,@base+@size). Crossing regions are split at the boundaries, 618 * which may create at most two more regions. The index of the first 619 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 620 * 621 * RETURNS: 622 * 0 on success, -errno on failure. 623 */ 624 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 625 phys_addr_t base, phys_addr_t size, 626 int *start_rgn, int *end_rgn) 627 { 628 phys_addr_t end = base + memblock_cap_size(base, &size); 629 int idx; 630 struct memblock_region *rgn; 631 632 *start_rgn = *end_rgn = 0; 633 634 if (!size) 635 return 0; 636 637 /* we'll create at most two more regions */ 638 while (type->cnt + 2 > type->max) 639 if (memblock_double_array(type, base, size) < 0) 640 return -ENOMEM; 641 642 for_each_memblock_type(idx, type, rgn) { 643 phys_addr_t rbase = rgn->base; 644 phys_addr_t rend = rbase + rgn->size; 645 646 if (rbase >= end) 647 break; 648 if (rend <= base) 649 continue; 650 651 if (rbase < base) { 652 /* 653 * @rgn intersects from below. Split and continue 654 * to process the next region - the new top half. 655 */ 656 rgn->base = base; 657 rgn->size -= base - rbase; 658 type->total_size -= base - rbase; 659 memblock_insert_region(type, idx, rbase, base - rbase, 660 memblock_get_region_node(rgn), 661 rgn->flags); 662 } else if (rend > end) { 663 /* 664 * @rgn intersects from above. Split and redo the 665 * current region - the new bottom half. 666 */ 667 rgn->base = end; 668 rgn->size -= end - rbase; 669 type->total_size -= end - rbase; 670 memblock_insert_region(type, idx--, rbase, end - rbase, 671 memblock_get_region_node(rgn), 672 rgn->flags); 673 } else { 674 /* @rgn is fully contained, record it */ 675 if (!*end_rgn) 676 *start_rgn = idx; 677 *end_rgn = idx + 1; 678 } 679 } 680 681 return 0; 682 } 683 684 static int __init_memblock memblock_remove_range(struct memblock_type *type, 685 phys_addr_t base, phys_addr_t size) 686 { 687 int start_rgn, end_rgn; 688 int i, ret; 689 690 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 691 if (ret) 692 return ret; 693 694 for (i = end_rgn - 1; i >= start_rgn; i--) 695 memblock_remove_region(type, i); 696 return 0; 697 } 698 699 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 700 { 701 phys_addr_t end = base + size - 1; 702 703 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n", 704 &base, &end, (void *)_RET_IP_); 705 706 return memblock_remove_range(&memblock.memory, base, size); 707 } 708 709 710 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 711 { 712 phys_addr_t end = base + size - 1; 713 714 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", 715 &base, &end, (void *)_RET_IP_); 716 717 kmemleak_free_part_phys(base, size); 718 return memblock_remove_range(&memblock.reserved, base, size); 719 } 720 721 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 722 { 723 phys_addr_t end = base + size - 1; 724 725 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n", 726 &base, &end, (void *)_RET_IP_); 727 728 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 729 } 730 731 /** 732 * 733 * This function isolates region [@base, @base + @size), and sets/clears flag 734 * 735 * Return 0 on success, -errno on failure. 736 */ 737 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 738 phys_addr_t size, int set, int flag) 739 { 740 struct memblock_type *type = &memblock.memory; 741 int i, ret, start_rgn, end_rgn; 742 743 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 744 if (ret) 745 return ret; 746 747 for (i = start_rgn; i < end_rgn; i++) 748 if (set) 749 memblock_set_region_flags(&type->regions[i], flag); 750 else 751 memblock_clear_region_flags(&type->regions[i], flag); 752 753 memblock_merge_regions(type); 754 return 0; 755 } 756 757 /** 758 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 759 * @base: the base phys addr of the region 760 * @size: the size of the region 761 * 762 * Return 0 on success, -errno on failure. 763 */ 764 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 765 { 766 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 767 } 768 769 /** 770 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 771 * @base: the base phys addr of the region 772 * @size: the size of the region 773 * 774 * Return 0 on success, -errno on failure. 775 */ 776 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 777 { 778 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 779 } 780 781 /** 782 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 783 * @base: the base phys addr of the region 784 * @size: the size of the region 785 * 786 * Return 0 on success, -errno on failure. 787 */ 788 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 789 { 790 system_has_some_mirror = true; 791 792 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 793 } 794 795 /** 796 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 797 * @base: the base phys addr of the region 798 * @size: the size of the region 799 * 800 * Return 0 on success, -errno on failure. 801 */ 802 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 803 { 804 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 805 } 806 807 /** 808 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 809 * @base: the base phys addr of the region 810 * @size: the size of the region 811 * 812 * Return 0 on success, -errno on failure. 813 */ 814 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 815 { 816 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 817 } 818 819 /** 820 * __next_reserved_mem_region - next function for for_each_reserved_region() 821 * @idx: pointer to u64 loop variable 822 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 823 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 824 * 825 * Iterate over all reserved memory regions. 826 */ 827 void __init_memblock __next_reserved_mem_region(u64 *idx, 828 phys_addr_t *out_start, 829 phys_addr_t *out_end) 830 { 831 struct memblock_type *type = &memblock.reserved; 832 833 if (*idx < type->cnt) { 834 struct memblock_region *r = &type->regions[*idx]; 835 phys_addr_t base = r->base; 836 phys_addr_t size = r->size; 837 838 if (out_start) 839 *out_start = base; 840 if (out_end) 841 *out_end = base + size - 1; 842 843 *idx += 1; 844 return; 845 } 846 847 /* signal end of iteration */ 848 *idx = ULLONG_MAX; 849 } 850 851 /** 852 * __next__mem_range - next function for for_each_free_mem_range() etc. 853 * @idx: pointer to u64 loop variable 854 * @nid: node selector, %NUMA_NO_NODE for all nodes 855 * @flags: pick from blocks based on memory attributes 856 * @type_a: pointer to memblock_type from where the range is taken 857 * @type_b: pointer to memblock_type which excludes memory from being taken 858 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 859 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 860 * @out_nid: ptr to int for nid of the range, can be %NULL 861 * 862 * Find the first area from *@idx which matches @nid, fill the out 863 * parameters, and update *@idx for the next iteration. The lower 32bit of 864 * *@idx contains index into type_a and the upper 32bit indexes the 865 * areas before each region in type_b. For example, if type_b regions 866 * look like the following, 867 * 868 * 0:[0-16), 1:[32-48), 2:[128-130) 869 * 870 * The upper 32bit indexes the following regions. 871 * 872 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 873 * 874 * As both region arrays are sorted, the function advances the two indices 875 * in lockstep and returns each intersection. 876 */ 877 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, 878 struct memblock_type *type_a, 879 struct memblock_type *type_b, 880 phys_addr_t *out_start, 881 phys_addr_t *out_end, int *out_nid) 882 { 883 int idx_a = *idx & 0xffffffff; 884 int idx_b = *idx >> 32; 885 886 if (WARN_ONCE(nid == MAX_NUMNODES, 887 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 888 nid = NUMA_NO_NODE; 889 890 for (; idx_a < type_a->cnt; idx_a++) { 891 struct memblock_region *m = &type_a->regions[idx_a]; 892 893 phys_addr_t m_start = m->base; 894 phys_addr_t m_end = m->base + m->size; 895 int m_nid = memblock_get_region_node(m); 896 897 /* only memory regions are associated with nodes, check it */ 898 if (nid != NUMA_NO_NODE && nid != m_nid) 899 continue; 900 901 /* skip hotpluggable memory regions if needed */ 902 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 903 continue; 904 905 /* if we want mirror memory skip non-mirror memory regions */ 906 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 907 continue; 908 909 /* skip nomap memory unless we were asked for it explicitly */ 910 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 911 continue; 912 913 if (!type_b) { 914 if (out_start) 915 *out_start = m_start; 916 if (out_end) 917 *out_end = m_end; 918 if (out_nid) 919 *out_nid = m_nid; 920 idx_a++; 921 *idx = (u32)idx_a | (u64)idx_b << 32; 922 return; 923 } 924 925 /* scan areas before each reservation */ 926 for (; idx_b < type_b->cnt + 1; idx_b++) { 927 struct memblock_region *r; 928 phys_addr_t r_start; 929 phys_addr_t r_end; 930 931 r = &type_b->regions[idx_b]; 932 r_start = idx_b ? r[-1].base + r[-1].size : 0; 933 r_end = idx_b < type_b->cnt ? 934 r->base : PHYS_ADDR_MAX; 935 936 /* 937 * if idx_b advanced past idx_a, 938 * break out to advance idx_a 939 */ 940 if (r_start >= m_end) 941 break; 942 /* if the two regions intersect, we're done */ 943 if (m_start < r_end) { 944 if (out_start) 945 *out_start = 946 max(m_start, r_start); 947 if (out_end) 948 *out_end = min(m_end, r_end); 949 if (out_nid) 950 *out_nid = m_nid; 951 /* 952 * The region which ends first is 953 * advanced for the next iteration. 954 */ 955 if (m_end <= r_end) 956 idx_a++; 957 else 958 idx_b++; 959 *idx = (u32)idx_a | (u64)idx_b << 32; 960 return; 961 } 962 } 963 } 964 965 /* signal end of iteration */ 966 *idx = ULLONG_MAX; 967 } 968 969 /** 970 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 971 * 972 * Finds the next range from type_a which is not marked as unsuitable 973 * in type_b. 974 * 975 * @idx: pointer to u64 loop variable 976 * @nid: node selector, %NUMA_NO_NODE for all nodes 977 * @flags: pick from blocks based on memory attributes 978 * @type_a: pointer to memblock_type from where the range is taken 979 * @type_b: pointer to memblock_type which excludes memory from being taken 980 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 981 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 982 * @out_nid: ptr to int for nid of the range, can be %NULL 983 * 984 * Reverse of __next_mem_range(). 985 */ 986 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, 987 struct memblock_type *type_a, 988 struct memblock_type *type_b, 989 phys_addr_t *out_start, 990 phys_addr_t *out_end, int *out_nid) 991 { 992 int idx_a = *idx & 0xffffffff; 993 int idx_b = *idx >> 32; 994 995 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 996 nid = NUMA_NO_NODE; 997 998 if (*idx == (u64)ULLONG_MAX) { 999 idx_a = type_a->cnt - 1; 1000 if (type_b != NULL) 1001 idx_b = type_b->cnt; 1002 else 1003 idx_b = 0; 1004 } 1005 1006 for (; idx_a >= 0; idx_a--) { 1007 struct memblock_region *m = &type_a->regions[idx_a]; 1008 1009 phys_addr_t m_start = m->base; 1010 phys_addr_t m_end = m->base + m->size; 1011 int m_nid = memblock_get_region_node(m); 1012 1013 /* only memory regions are associated with nodes, check it */ 1014 if (nid != NUMA_NO_NODE && nid != m_nid) 1015 continue; 1016 1017 /* skip hotpluggable memory regions if needed */ 1018 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 1019 continue; 1020 1021 /* if we want mirror memory skip non-mirror memory regions */ 1022 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1023 continue; 1024 1025 /* skip nomap memory unless we were asked for it explicitly */ 1026 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1027 continue; 1028 1029 if (!type_b) { 1030 if (out_start) 1031 *out_start = m_start; 1032 if (out_end) 1033 *out_end = m_end; 1034 if (out_nid) 1035 *out_nid = m_nid; 1036 idx_a--; 1037 *idx = (u32)idx_a | (u64)idx_b << 32; 1038 return; 1039 } 1040 1041 /* scan areas before each reservation */ 1042 for (; idx_b >= 0; idx_b--) { 1043 struct memblock_region *r; 1044 phys_addr_t r_start; 1045 phys_addr_t r_end; 1046 1047 r = &type_b->regions[idx_b]; 1048 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1049 r_end = idx_b < type_b->cnt ? 1050 r->base : PHYS_ADDR_MAX; 1051 /* 1052 * if idx_b advanced past idx_a, 1053 * break out to advance idx_a 1054 */ 1055 1056 if (r_end <= m_start) 1057 break; 1058 /* if the two regions intersect, we're done */ 1059 if (m_end > r_start) { 1060 if (out_start) 1061 *out_start = max(m_start, r_start); 1062 if (out_end) 1063 *out_end = min(m_end, r_end); 1064 if (out_nid) 1065 *out_nid = m_nid; 1066 if (m_start >= r_start) 1067 idx_a--; 1068 else 1069 idx_b--; 1070 *idx = (u32)idx_a | (u64)idx_b << 32; 1071 return; 1072 } 1073 } 1074 } 1075 /* signal end of iteration */ 1076 *idx = ULLONG_MAX; 1077 } 1078 1079 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1080 /* 1081 * Common iterator interface used to define for_each_mem_range(). 1082 */ 1083 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1084 unsigned long *out_start_pfn, 1085 unsigned long *out_end_pfn, int *out_nid) 1086 { 1087 struct memblock_type *type = &memblock.memory; 1088 struct memblock_region *r; 1089 1090 while (++*idx < type->cnt) { 1091 r = &type->regions[*idx]; 1092 1093 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1094 continue; 1095 if (nid == MAX_NUMNODES || nid == r->nid) 1096 break; 1097 } 1098 if (*idx >= type->cnt) { 1099 *idx = -1; 1100 return; 1101 } 1102 1103 if (out_start_pfn) 1104 *out_start_pfn = PFN_UP(r->base); 1105 if (out_end_pfn) 1106 *out_end_pfn = PFN_DOWN(r->base + r->size); 1107 if (out_nid) 1108 *out_nid = r->nid; 1109 } 1110 1111 /** 1112 * memblock_set_node - set node ID on memblock regions 1113 * @base: base of area to set node ID for 1114 * @size: size of area to set node ID for 1115 * @type: memblock type to set node ID for 1116 * @nid: node ID to set 1117 * 1118 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1119 * Regions which cross the area boundaries are split as necessary. 1120 * 1121 * RETURNS: 1122 * 0 on success, -errno on failure. 1123 */ 1124 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1125 struct memblock_type *type, int nid) 1126 { 1127 int start_rgn, end_rgn; 1128 int i, ret; 1129 1130 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1131 if (ret) 1132 return ret; 1133 1134 for (i = start_rgn; i < end_rgn; i++) 1135 memblock_set_region_node(&type->regions[i], nid); 1136 1137 memblock_merge_regions(type); 1138 return 0; 1139 } 1140 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1141 1142 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1143 phys_addr_t align, phys_addr_t start, 1144 phys_addr_t end, int nid, ulong flags) 1145 { 1146 phys_addr_t found; 1147 1148 if (!align) 1149 align = SMP_CACHE_BYTES; 1150 1151 found = memblock_find_in_range_node(size, align, start, end, nid, 1152 flags); 1153 if (found && !memblock_reserve(found, size)) { 1154 /* 1155 * The min_count is set to 0 so that memblock allocations are 1156 * never reported as leaks. 1157 */ 1158 kmemleak_alloc_phys(found, size, 0, 0); 1159 return found; 1160 } 1161 return 0; 1162 } 1163 1164 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1165 phys_addr_t start, phys_addr_t end, 1166 ulong flags) 1167 { 1168 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1169 flags); 1170 } 1171 1172 phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1173 phys_addr_t align, phys_addr_t max_addr, 1174 int nid, ulong flags) 1175 { 1176 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); 1177 } 1178 1179 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1180 { 1181 ulong flags = choose_memblock_flags(); 1182 phys_addr_t ret; 1183 1184 again: 1185 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1186 nid, flags); 1187 1188 if (!ret && (flags & MEMBLOCK_MIRROR)) { 1189 flags &= ~MEMBLOCK_MIRROR; 1190 goto again; 1191 } 1192 return ret; 1193 } 1194 1195 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1196 { 1197 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, 1198 MEMBLOCK_NONE); 1199 } 1200 1201 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1202 { 1203 phys_addr_t alloc; 1204 1205 alloc = __memblock_alloc_base(size, align, max_addr); 1206 1207 if (alloc == 0) 1208 panic("ERROR: Failed to allocate %pa bytes below %pa.\n", 1209 &size, &max_addr); 1210 1211 return alloc; 1212 } 1213 1214 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1215 { 1216 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1217 } 1218 1219 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1220 { 1221 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1222 1223 if (res) 1224 return res; 1225 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1226 } 1227 1228 /** 1229 * memblock_virt_alloc_internal - allocate boot memory block 1230 * @size: size of memory block to be allocated in bytes 1231 * @align: alignment of the region and block's size 1232 * @min_addr: the lower bound of the memory region to allocate (phys address) 1233 * @max_addr: the upper bound of the memory region to allocate (phys address) 1234 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1235 * 1236 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1237 * will fall back to memory below @min_addr. Also, allocation may fall back 1238 * to any node in the system if the specified node can not 1239 * hold the requested memory. 1240 * 1241 * The allocation is performed from memory region limited by 1242 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1243 * 1244 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1245 * 1246 * The phys address of allocated boot memory block is converted to virtual and 1247 * allocated memory is reset to 0. 1248 * 1249 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1250 * allocated boot memory block, so that it is never reported as leaks. 1251 * 1252 * RETURNS: 1253 * Virtual address of allocated memory block on success, NULL on failure. 1254 */ 1255 static void * __init memblock_virt_alloc_internal( 1256 phys_addr_t size, phys_addr_t align, 1257 phys_addr_t min_addr, phys_addr_t max_addr, 1258 int nid) 1259 { 1260 phys_addr_t alloc; 1261 void *ptr; 1262 ulong flags = choose_memblock_flags(); 1263 1264 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1265 nid = NUMA_NO_NODE; 1266 1267 /* 1268 * Detect any accidental use of these APIs after slab is ready, as at 1269 * this moment memblock may be deinitialized already and its 1270 * internal data may be destroyed (after execution of free_all_bootmem) 1271 */ 1272 if (WARN_ON_ONCE(slab_is_available())) 1273 return kzalloc_node(size, GFP_NOWAIT, nid); 1274 1275 if (!align) 1276 align = SMP_CACHE_BYTES; 1277 1278 if (max_addr > memblock.current_limit) 1279 max_addr = memblock.current_limit; 1280 again: 1281 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1282 nid, flags); 1283 if (alloc && !memblock_reserve(alloc, size)) 1284 goto done; 1285 1286 if (nid != NUMA_NO_NODE) { 1287 alloc = memblock_find_in_range_node(size, align, min_addr, 1288 max_addr, NUMA_NO_NODE, 1289 flags); 1290 if (alloc && !memblock_reserve(alloc, size)) 1291 goto done; 1292 } 1293 1294 if (min_addr) { 1295 min_addr = 0; 1296 goto again; 1297 } 1298 1299 if (flags & MEMBLOCK_MIRROR) { 1300 flags &= ~MEMBLOCK_MIRROR; 1301 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1302 &size); 1303 goto again; 1304 } 1305 1306 return NULL; 1307 done: 1308 ptr = phys_to_virt(alloc); 1309 1310 /* 1311 * The min_count is set to 0 so that bootmem allocated blocks 1312 * are never reported as leaks. This is because many of these blocks 1313 * are only referred via the physical address which is not 1314 * looked up by kmemleak. 1315 */ 1316 kmemleak_alloc(ptr, size, 0, 0); 1317 1318 return ptr; 1319 } 1320 1321 /** 1322 * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing 1323 * memory and without panicking 1324 * @size: size of memory block to be allocated in bytes 1325 * @align: alignment of the region and block's size 1326 * @min_addr: the lower bound of the memory region from where the allocation 1327 * is preferred (phys address) 1328 * @max_addr: the upper bound of the memory region from where the allocation 1329 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1330 * allocate only from memory limited by memblock.current_limit value 1331 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1332 * 1333 * Public function, provides additional debug information (including caller 1334 * info), if enabled. Does not zero allocated memory, does not panic if request 1335 * cannot be satisfied. 1336 * 1337 * RETURNS: 1338 * Virtual address of allocated memory block on success, NULL on failure. 1339 */ 1340 void * __init memblock_virt_alloc_try_nid_raw( 1341 phys_addr_t size, phys_addr_t align, 1342 phys_addr_t min_addr, phys_addr_t max_addr, 1343 int nid) 1344 { 1345 void *ptr; 1346 1347 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1348 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1349 (u64)max_addr, (void *)_RET_IP_); 1350 1351 ptr = memblock_virt_alloc_internal(size, align, 1352 min_addr, max_addr, nid); 1353 #ifdef CONFIG_DEBUG_VM 1354 if (ptr && size > 0) 1355 memset(ptr, PAGE_POISON_PATTERN, size); 1356 #endif 1357 return ptr; 1358 } 1359 1360 /** 1361 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1362 * @size: size of memory block to be allocated in bytes 1363 * @align: alignment of the region and block's size 1364 * @min_addr: the lower bound of the memory region from where the allocation 1365 * is preferred (phys address) 1366 * @max_addr: the upper bound of the memory region from where the allocation 1367 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1368 * allocate only from memory limited by memblock.current_limit value 1369 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1370 * 1371 * Public function, provides additional debug information (including caller 1372 * info), if enabled. This function zeroes the allocated memory. 1373 * 1374 * RETURNS: 1375 * Virtual address of allocated memory block on success, NULL on failure. 1376 */ 1377 void * __init memblock_virt_alloc_try_nid_nopanic( 1378 phys_addr_t size, phys_addr_t align, 1379 phys_addr_t min_addr, phys_addr_t max_addr, 1380 int nid) 1381 { 1382 void *ptr; 1383 1384 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1385 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1386 (u64)max_addr, (void *)_RET_IP_); 1387 1388 ptr = memblock_virt_alloc_internal(size, align, 1389 min_addr, max_addr, nid); 1390 if (ptr) 1391 memset(ptr, 0, size); 1392 return ptr; 1393 } 1394 1395 /** 1396 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1397 * @size: size of memory block to be allocated in bytes 1398 * @align: alignment of the region and block's size 1399 * @min_addr: the lower bound of the memory region from where the allocation 1400 * is preferred (phys address) 1401 * @max_addr: the upper bound of the memory region from where the allocation 1402 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1403 * allocate only from memory limited by memblock.current_limit value 1404 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1405 * 1406 * Public panicking version of memblock_virt_alloc_try_nid_nopanic() 1407 * which provides debug information (including caller info), if enabled, 1408 * and panics if the request can not be satisfied. 1409 * 1410 * RETURNS: 1411 * Virtual address of allocated memory block on success, NULL on failure. 1412 */ 1413 void * __init memblock_virt_alloc_try_nid( 1414 phys_addr_t size, phys_addr_t align, 1415 phys_addr_t min_addr, phys_addr_t max_addr, 1416 int nid) 1417 { 1418 void *ptr; 1419 1420 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1421 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1422 (u64)max_addr, (void *)_RET_IP_); 1423 ptr = memblock_virt_alloc_internal(size, align, 1424 min_addr, max_addr, nid); 1425 if (ptr) { 1426 memset(ptr, 0, size); 1427 return ptr; 1428 } 1429 1430 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1431 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1432 (u64)max_addr); 1433 return NULL; 1434 } 1435 1436 /** 1437 * __memblock_free_early - free boot memory block 1438 * @base: phys starting address of the boot memory block 1439 * @size: size of the boot memory block in bytes 1440 * 1441 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1442 * The freeing memory will not be released to the buddy allocator. 1443 */ 1444 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1445 { 1446 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1447 __func__, (u64)base, (u64)base + size - 1, 1448 (void *)_RET_IP_); 1449 kmemleak_free_part_phys(base, size); 1450 memblock_remove_range(&memblock.reserved, base, size); 1451 } 1452 1453 /* 1454 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1455 * @addr: phys starting address of the boot memory block 1456 * @size: size of the boot memory block in bytes 1457 * 1458 * This is only useful when the bootmem allocator has already been torn 1459 * down, but we are still initializing the system. Pages are released directly 1460 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1461 */ 1462 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1463 { 1464 u64 cursor, end; 1465 1466 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1467 __func__, (u64)base, (u64)base + size - 1, 1468 (void *)_RET_IP_); 1469 kmemleak_free_part_phys(base, size); 1470 cursor = PFN_UP(base); 1471 end = PFN_DOWN(base + size); 1472 1473 for (; cursor < end; cursor++) { 1474 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 1475 totalram_pages++; 1476 } 1477 } 1478 1479 /* 1480 * Remaining API functions 1481 */ 1482 1483 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1484 { 1485 return memblock.memory.total_size; 1486 } 1487 1488 phys_addr_t __init_memblock memblock_reserved_size(void) 1489 { 1490 return memblock.reserved.total_size; 1491 } 1492 1493 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1494 { 1495 unsigned long pages = 0; 1496 struct memblock_region *r; 1497 unsigned long start_pfn, end_pfn; 1498 1499 for_each_memblock(memory, r) { 1500 start_pfn = memblock_region_memory_base_pfn(r); 1501 end_pfn = memblock_region_memory_end_pfn(r); 1502 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1503 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1504 pages += end_pfn - start_pfn; 1505 } 1506 1507 return PFN_PHYS(pages); 1508 } 1509 1510 /* lowest address */ 1511 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1512 { 1513 return memblock.memory.regions[0].base; 1514 } 1515 1516 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1517 { 1518 int idx = memblock.memory.cnt - 1; 1519 1520 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1521 } 1522 1523 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1524 { 1525 phys_addr_t max_addr = PHYS_ADDR_MAX; 1526 struct memblock_region *r; 1527 1528 /* 1529 * translate the memory @limit size into the max address within one of 1530 * the memory memblock regions, if the @limit exceeds the total size 1531 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1532 */ 1533 for_each_memblock(memory, r) { 1534 if (limit <= r->size) { 1535 max_addr = r->base + limit; 1536 break; 1537 } 1538 limit -= r->size; 1539 } 1540 1541 return max_addr; 1542 } 1543 1544 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1545 { 1546 phys_addr_t max_addr = PHYS_ADDR_MAX; 1547 1548 if (!limit) 1549 return; 1550 1551 max_addr = __find_max_addr(limit); 1552 1553 /* @limit exceeds the total size of the memory, do nothing */ 1554 if (max_addr == PHYS_ADDR_MAX) 1555 return; 1556 1557 /* truncate both memory and reserved regions */ 1558 memblock_remove_range(&memblock.memory, max_addr, 1559 PHYS_ADDR_MAX); 1560 memblock_remove_range(&memblock.reserved, max_addr, 1561 PHYS_ADDR_MAX); 1562 } 1563 1564 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1565 { 1566 int start_rgn, end_rgn; 1567 int i, ret; 1568 1569 if (!size) 1570 return; 1571 1572 ret = memblock_isolate_range(&memblock.memory, base, size, 1573 &start_rgn, &end_rgn); 1574 if (ret) 1575 return; 1576 1577 /* remove all the MAP regions */ 1578 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1579 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1580 memblock_remove_region(&memblock.memory, i); 1581 1582 for (i = start_rgn - 1; i >= 0; i--) 1583 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1584 memblock_remove_region(&memblock.memory, i); 1585 1586 /* truncate the reserved regions */ 1587 memblock_remove_range(&memblock.reserved, 0, base); 1588 memblock_remove_range(&memblock.reserved, 1589 base + size, PHYS_ADDR_MAX); 1590 } 1591 1592 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1593 { 1594 phys_addr_t max_addr; 1595 1596 if (!limit) 1597 return; 1598 1599 max_addr = __find_max_addr(limit); 1600 1601 /* @limit exceeds the total size of the memory, do nothing */ 1602 if (max_addr == PHYS_ADDR_MAX) 1603 return; 1604 1605 memblock_cap_memory_range(0, max_addr); 1606 } 1607 1608 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1609 { 1610 unsigned int left = 0, right = type->cnt; 1611 1612 do { 1613 unsigned int mid = (right + left) / 2; 1614 1615 if (addr < type->regions[mid].base) 1616 right = mid; 1617 else if (addr >= (type->regions[mid].base + 1618 type->regions[mid].size)) 1619 left = mid + 1; 1620 else 1621 return mid; 1622 } while (left < right); 1623 return -1; 1624 } 1625 1626 bool __init memblock_is_reserved(phys_addr_t addr) 1627 { 1628 return memblock_search(&memblock.reserved, addr) != -1; 1629 } 1630 1631 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1632 { 1633 return memblock_search(&memblock.memory, addr) != -1; 1634 } 1635 1636 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1637 { 1638 int i = memblock_search(&memblock.memory, addr); 1639 1640 if (i == -1) 1641 return false; 1642 return !memblock_is_nomap(&memblock.memory.regions[i]); 1643 } 1644 1645 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1646 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1647 unsigned long *start_pfn, unsigned long *end_pfn) 1648 { 1649 struct memblock_type *type = &memblock.memory; 1650 int mid = memblock_search(type, PFN_PHYS(pfn)); 1651 1652 if (mid == -1) 1653 return -1; 1654 1655 *start_pfn = PFN_DOWN(type->regions[mid].base); 1656 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1657 1658 return type->regions[mid].nid; 1659 } 1660 #endif 1661 1662 /** 1663 * memblock_is_region_memory - check if a region is a subset of memory 1664 * @base: base of region to check 1665 * @size: size of region to check 1666 * 1667 * Check if the region [@base, @base+@size) is a subset of a memory block. 1668 * 1669 * RETURNS: 1670 * 0 if false, non-zero if true 1671 */ 1672 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1673 { 1674 int idx = memblock_search(&memblock.memory, base); 1675 phys_addr_t end = base + memblock_cap_size(base, &size); 1676 1677 if (idx == -1) 1678 return false; 1679 return (memblock.memory.regions[idx].base + 1680 memblock.memory.regions[idx].size) >= end; 1681 } 1682 1683 /** 1684 * memblock_is_region_reserved - check if a region intersects reserved memory 1685 * @base: base of region to check 1686 * @size: size of region to check 1687 * 1688 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1689 * 1690 * RETURNS: 1691 * True if they intersect, false if not. 1692 */ 1693 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1694 { 1695 memblock_cap_size(base, &size); 1696 return memblock_overlaps_region(&memblock.reserved, base, size); 1697 } 1698 1699 void __init_memblock memblock_trim_memory(phys_addr_t align) 1700 { 1701 phys_addr_t start, end, orig_start, orig_end; 1702 struct memblock_region *r; 1703 1704 for_each_memblock(memory, r) { 1705 orig_start = r->base; 1706 orig_end = r->base + r->size; 1707 start = round_up(orig_start, align); 1708 end = round_down(orig_end, align); 1709 1710 if (start == orig_start && end == orig_end) 1711 continue; 1712 1713 if (start < end) { 1714 r->base = start; 1715 r->size = end - start; 1716 } else { 1717 memblock_remove_region(&memblock.memory, 1718 r - memblock.memory.regions); 1719 r--; 1720 } 1721 } 1722 } 1723 1724 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1725 { 1726 memblock.current_limit = limit; 1727 } 1728 1729 phys_addr_t __init_memblock memblock_get_current_limit(void) 1730 { 1731 return memblock.current_limit; 1732 } 1733 1734 static void __init_memblock memblock_dump(struct memblock_type *type) 1735 { 1736 phys_addr_t base, end, size; 1737 unsigned long flags; 1738 int idx; 1739 struct memblock_region *rgn; 1740 1741 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1742 1743 for_each_memblock_type(idx, type, rgn) { 1744 char nid_buf[32] = ""; 1745 1746 base = rgn->base; 1747 size = rgn->size; 1748 end = base + size - 1; 1749 flags = rgn->flags; 1750 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1751 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1752 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1753 memblock_get_region_node(rgn)); 1754 #endif 1755 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n", 1756 type->name, idx, &base, &end, &size, nid_buf, flags); 1757 } 1758 } 1759 1760 void __init_memblock __memblock_dump_all(void) 1761 { 1762 pr_info("MEMBLOCK configuration:\n"); 1763 pr_info(" memory size = %pa reserved size = %pa\n", 1764 &memblock.memory.total_size, 1765 &memblock.reserved.total_size); 1766 1767 memblock_dump(&memblock.memory); 1768 memblock_dump(&memblock.reserved); 1769 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1770 memblock_dump(&memblock.physmem); 1771 #endif 1772 } 1773 1774 void __init memblock_allow_resize(void) 1775 { 1776 memblock_can_resize = 1; 1777 } 1778 1779 static int __init early_memblock(char *p) 1780 { 1781 if (p && strstr(p, "debug")) 1782 memblock_debug = 1; 1783 return 0; 1784 } 1785 early_param("memblock", early_memblock); 1786 1787 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1788 1789 static int memblock_debug_show(struct seq_file *m, void *private) 1790 { 1791 struct memblock_type *type = m->private; 1792 struct memblock_region *reg; 1793 int i; 1794 phys_addr_t end; 1795 1796 for (i = 0; i < type->cnt; i++) { 1797 reg = &type->regions[i]; 1798 end = reg->base + reg->size - 1; 1799 1800 seq_printf(m, "%4d: ", i); 1801 seq_printf(m, "%pa..%pa\n", ®->base, &end); 1802 } 1803 return 0; 1804 } 1805 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 1806 1807 static int __init memblock_init_debugfs(void) 1808 { 1809 struct dentry *root = debugfs_create_dir("memblock", NULL); 1810 if (!root) 1811 return -ENXIO; 1812 debugfs_create_file("memory", 0444, root, 1813 &memblock.memory, &memblock_debug_fops); 1814 debugfs_create_file("reserved", 0444, root, 1815 &memblock.reserved, &memblock_debug_fops); 1816 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1817 debugfs_create_file("physmem", 0444, root, 1818 &memblock.physmem, &memblock_debug_fops); 1819 #endif 1820 1821 return 0; 1822 } 1823 __initcall(memblock_init_debugfs); 1824 1825 #endif /* CONFIG_DEBUG_FS */ 1826