1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 #include <asm-generic/sections.h> 24 #include <linux/io.h> 25 26 #include "internal.h" 27 28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 32 #endif 33 34 struct memblock memblock __initdata_memblock = { 35 .memory.regions = memblock_memory_init_regions, 36 .memory.cnt = 1, /* empty dummy entry */ 37 .memory.max = INIT_MEMBLOCK_REGIONS, 38 39 .reserved.regions = memblock_reserved_init_regions, 40 .reserved.cnt = 1, /* empty dummy entry */ 41 .reserved.max = INIT_MEMBLOCK_REGIONS, 42 43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 44 .physmem.regions = memblock_physmem_init_regions, 45 .physmem.cnt = 1, /* empty dummy entry */ 46 .physmem.max = INIT_PHYSMEM_REGIONS, 47 #endif 48 49 .bottom_up = false, 50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51 }; 52 53 int memblock_debug __initdata_memblock; 54 #ifdef CONFIG_MOVABLE_NODE 55 bool movable_node_enabled __initdata_memblock = false; 56 #endif 57 static int memblock_can_resize __initdata_memblock; 58 static int memblock_memory_in_slab __initdata_memblock = 0; 59 static int memblock_reserved_in_slab __initdata_memblock = 0; 60 61 /* inline so we don't get a warning when pr_debug is compiled out */ 62 static __init_memblock const char * 63 memblock_type_name(struct memblock_type *type) 64 { 65 if (type == &memblock.memory) 66 return "memory"; 67 else if (type == &memblock.reserved) 68 return "reserved"; 69 else 70 return "unknown"; 71 } 72 73 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 74 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 75 { 76 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 77 } 78 79 /* 80 * Address comparison utilities 81 */ 82 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 83 phys_addr_t base2, phys_addr_t size2) 84 { 85 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 86 } 87 88 static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 89 phys_addr_t base, phys_addr_t size) 90 { 91 unsigned long i; 92 93 for (i = 0; i < type->cnt; i++) { 94 phys_addr_t rgnbase = type->regions[i].base; 95 phys_addr_t rgnsize = type->regions[i].size; 96 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 97 break; 98 } 99 100 return (i < type->cnt) ? i : -1; 101 } 102 103 /* 104 * __memblock_find_range_bottom_up - find free area utility in bottom-up 105 * @start: start of candidate range 106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 107 * @size: size of free area to find 108 * @align: alignment of free area to find 109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 110 * 111 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 112 * 113 * RETURNS: 114 * Found address on success, 0 on failure. 115 */ 116 static phys_addr_t __init_memblock 117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 118 phys_addr_t size, phys_addr_t align, int nid) 119 { 120 phys_addr_t this_start, this_end, cand; 121 u64 i; 122 123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { 124 this_start = clamp(this_start, start, end); 125 this_end = clamp(this_end, start, end); 126 127 cand = round_up(this_start, align); 128 if (cand < this_end && this_end - cand >= size) 129 return cand; 130 } 131 132 return 0; 133 } 134 135 /** 136 * __memblock_find_range_top_down - find free area utility, in top-down 137 * @start: start of candidate range 138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 139 * @size: size of free area to find 140 * @align: alignment of free area to find 141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 142 * 143 * Utility called from memblock_find_in_range_node(), find free area top-down. 144 * 145 * RETURNS: 146 * Found address on success, 0 on failure. 147 */ 148 static phys_addr_t __init_memblock 149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 150 phys_addr_t size, phys_addr_t align, int nid) 151 { 152 phys_addr_t this_start, this_end, cand; 153 u64 i; 154 155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 156 this_start = clamp(this_start, start, end); 157 this_end = clamp(this_end, start, end); 158 159 if (this_end < size) 160 continue; 161 162 cand = round_down(this_end - size, align); 163 if (cand >= this_start) 164 return cand; 165 } 166 167 return 0; 168 } 169 170 /** 171 * memblock_find_in_range_node - find free area in given range and node 172 * @size: size of free area to find 173 * @align: alignment of free area to find 174 * @start: start of candidate range 175 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 176 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 177 * 178 * Find @size free area aligned to @align in the specified range and node. 179 * 180 * When allocation direction is bottom-up, the @start should be greater 181 * than the end of the kernel image. Otherwise, it will be trimmed. The 182 * reason is that we want the bottom-up allocation just near the kernel 183 * image so it is highly likely that the allocated memory and the kernel 184 * will reside in the same node. 185 * 186 * If bottom-up allocation failed, will try to allocate memory top-down. 187 * 188 * RETURNS: 189 * Found address on success, 0 on failure. 190 */ 191 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 192 phys_addr_t align, phys_addr_t start, 193 phys_addr_t end, int nid) 194 { 195 phys_addr_t kernel_end, ret; 196 197 /* pump up @end */ 198 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 199 end = memblock.current_limit; 200 201 /* avoid allocating the first page */ 202 start = max_t(phys_addr_t, start, PAGE_SIZE); 203 end = max(start, end); 204 kernel_end = __pa_symbol(_end); 205 206 /* 207 * try bottom-up allocation only when bottom-up mode 208 * is set and @end is above the kernel image. 209 */ 210 if (memblock_bottom_up() && end > kernel_end) { 211 phys_addr_t bottom_up_start; 212 213 /* make sure we will allocate above the kernel */ 214 bottom_up_start = max(start, kernel_end); 215 216 /* ok, try bottom-up allocation first */ 217 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 218 size, align, nid); 219 if (ret) 220 return ret; 221 222 /* 223 * we always limit bottom-up allocation above the kernel, 224 * but top-down allocation doesn't have the limit, so 225 * retrying top-down allocation may succeed when bottom-up 226 * allocation failed. 227 * 228 * bottom-up allocation is expected to be fail very rarely, 229 * so we use WARN_ONCE() here to see the stack trace if 230 * fail happens. 231 */ 232 WARN_ONCE(1, "memblock: bottom-up allocation failed, " 233 "memory hotunplug may be affected\n"); 234 } 235 236 return __memblock_find_range_top_down(start, end, size, align, nid); 237 } 238 239 /** 240 * memblock_find_in_range - find free area in given range 241 * @start: start of candidate range 242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 243 * @size: size of free area to find 244 * @align: alignment of free area to find 245 * 246 * Find @size free area aligned to @align in the specified range. 247 * 248 * RETURNS: 249 * Found address on success, 0 on failure. 250 */ 251 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 252 phys_addr_t end, phys_addr_t size, 253 phys_addr_t align) 254 { 255 return memblock_find_in_range_node(size, align, start, end, 256 NUMA_NO_NODE); 257 } 258 259 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 260 { 261 type->total_size -= type->regions[r].size; 262 memmove(&type->regions[r], &type->regions[r + 1], 263 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 264 type->cnt--; 265 266 /* Special case for empty arrays */ 267 if (type->cnt == 0) { 268 WARN_ON(type->total_size != 0); 269 type->cnt = 1; 270 type->regions[0].base = 0; 271 type->regions[0].size = 0; 272 type->regions[0].flags = 0; 273 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 274 } 275 } 276 277 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 278 279 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 280 phys_addr_t *addr) 281 { 282 if (memblock.reserved.regions == memblock_reserved_init_regions) 283 return 0; 284 285 *addr = __pa(memblock.reserved.regions); 286 287 return PAGE_ALIGN(sizeof(struct memblock_region) * 288 memblock.reserved.max); 289 } 290 291 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 292 phys_addr_t *addr) 293 { 294 if (memblock.memory.regions == memblock_memory_init_regions) 295 return 0; 296 297 *addr = __pa(memblock.memory.regions); 298 299 return PAGE_ALIGN(sizeof(struct memblock_region) * 300 memblock.memory.max); 301 } 302 303 #endif 304 305 /** 306 * memblock_double_array - double the size of the memblock regions array 307 * @type: memblock type of the regions array being doubled 308 * @new_area_start: starting address of memory range to avoid overlap with 309 * @new_area_size: size of memory range to avoid overlap with 310 * 311 * Double the size of the @type regions array. If memblock is being used to 312 * allocate memory for a new reserved regions array and there is a previously 313 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 314 * waiting to be reserved, ensure the memory used by the new array does 315 * not overlap. 316 * 317 * RETURNS: 318 * 0 on success, -1 on failure. 319 */ 320 static int __init_memblock memblock_double_array(struct memblock_type *type, 321 phys_addr_t new_area_start, 322 phys_addr_t new_area_size) 323 { 324 struct memblock_region *new_array, *old_array; 325 phys_addr_t old_alloc_size, new_alloc_size; 326 phys_addr_t old_size, new_size, addr; 327 int use_slab = slab_is_available(); 328 int *in_slab; 329 330 /* We don't allow resizing until we know about the reserved regions 331 * of memory that aren't suitable for allocation 332 */ 333 if (!memblock_can_resize) 334 return -1; 335 336 /* Calculate new doubled size */ 337 old_size = type->max * sizeof(struct memblock_region); 338 new_size = old_size << 1; 339 /* 340 * We need to allocated new one align to PAGE_SIZE, 341 * so we can free them completely later. 342 */ 343 old_alloc_size = PAGE_ALIGN(old_size); 344 new_alloc_size = PAGE_ALIGN(new_size); 345 346 /* Retrieve the slab flag */ 347 if (type == &memblock.memory) 348 in_slab = &memblock_memory_in_slab; 349 else 350 in_slab = &memblock_reserved_in_slab; 351 352 /* Try to find some space for it. 353 * 354 * WARNING: We assume that either slab_is_available() and we use it or 355 * we use MEMBLOCK for allocations. That means that this is unsafe to 356 * use when bootmem is currently active (unless bootmem itself is 357 * implemented on top of MEMBLOCK which isn't the case yet) 358 * 359 * This should however not be an issue for now, as we currently only 360 * call into MEMBLOCK while it's still active, or much later when slab 361 * is active for memory hotplug operations 362 */ 363 if (use_slab) { 364 new_array = kmalloc(new_size, GFP_KERNEL); 365 addr = new_array ? __pa(new_array) : 0; 366 } else { 367 /* only exclude range when trying to double reserved.regions */ 368 if (type != &memblock.reserved) 369 new_area_start = new_area_size = 0; 370 371 addr = memblock_find_in_range(new_area_start + new_area_size, 372 memblock.current_limit, 373 new_alloc_size, PAGE_SIZE); 374 if (!addr && new_area_size) 375 addr = memblock_find_in_range(0, 376 min(new_area_start, memblock.current_limit), 377 new_alloc_size, PAGE_SIZE); 378 379 new_array = addr ? __va(addr) : NULL; 380 } 381 if (!addr) { 382 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 383 memblock_type_name(type), type->max, type->max * 2); 384 return -1; 385 } 386 387 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 388 memblock_type_name(type), type->max * 2, (u64)addr, 389 (u64)addr + new_size - 1); 390 391 /* 392 * Found space, we now need to move the array over before we add the 393 * reserved region since it may be our reserved array itself that is 394 * full. 395 */ 396 memcpy(new_array, type->regions, old_size); 397 memset(new_array + type->max, 0, old_size); 398 old_array = type->regions; 399 type->regions = new_array; 400 type->max <<= 1; 401 402 /* Free old array. We needn't free it if the array is the static one */ 403 if (*in_slab) 404 kfree(old_array); 405 else if (old_array != memblock_memory_init_regions && 406 old_array != memblock_reserved_init_regions) 407 memblock_free(__pa(old_array), old_alloc_size); 408 409 /* 410 * Reserve the new array if that comes from the memblock. Otherwise, we 411 * needn't do it 412 */ 413 if (!use_slab) 414 BUG_ON(memblock_reserve(addr, new_alloc_size)); 415 416 /* Update slab flag */ 417 *in_slab = use_slab; 418 419 return 0; 420 } 421 422 /** 423 * memblock_merge_regions - merge neighboring compatible regions 424 * @type: memblock type to scan 425 * 426 * Scan @type and merge neighboring compatible regions. 427 */ 428 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 429 { 430 int i = 0; 431 432 /* cnt never goes below 1 */ 433 while (i < type->cnt - 1) { 434 struct memblock_region *this = &type->regions[i]; 435 struct memblock_region *next = &type->regions[i + 1]; 436 437 if (this->base + this->size != next->base || 438 memblock_get_region_node(this) != 439 memblock_get_region_node(next) || 440 this->flags != next->flags) { 441 BUG_ON(this->base + this->size > next->base); 442 i++; 443 continue; 444 } 445 446 this->size += next->size; 447 /* move forward from next + 1, index of which is i + 2 */ 448 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 449 type->cnt--; 450 } 451 } 452 453 /** 454 * memblock_insert_region - insert new memblock region 455 * @type: memblock type to insert into 456 * @idx: index for the insertion point 457 * @base: base address of the new region 458 * @size: size of the new region 459 * @nid: node id of the new region 460 * @flags: flags of the new region 461 * 462 * Insert new memblock region [@base,@base+@size) into @type at @idx. 463 * @type must already have extra room to accomodate the new region. 464 */ 465 static void __init_memblock memblock_insert_region(struct memblock_type *type, 466 int idx, phys_addr_t base, 467 phys_addr_t size, 468 int nid, unsigned long flags) 469 { 470 struct memblock_region *rgn = &type->regions[idx]; 471 472 BUG_ON(type->cnt >= type->max); 473 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 474 rgn->base = base; 475 rgn->size = size; 476 rgn->flags = flags; 477 memblock_set_region_node(rgn, nid); 478 type->cnt++; 479 type->total_size += size; 480 } 481 482 /** 483 * memblock_add_range - add new memblock region 484 * @type: memblock type to add new region into 485 * @base: base address of the new region 486 * @size: size of the new region 487 * @nid: nid of the new region 488 * @flags: flags of the new region 489 * 490 * Add new memblock region [@base,@base+@size) into @type. The new region 491 * is allowed to overlap with existing ones - overlaps don't affect already 492 * existing regions. @type is guaranteed to be minimal (all neighbouring 493 * compatible regions are merged) after the addition. 494 * 495 * RETURNS: 496 * 0 on success, -errno on failure. 497 */ 498 int __init_memblock memblock_add_range(struct memblock_type *type, 499 phys_addr_t base, phys_addr_t size, 500 int nid, unsigned long flags) 501 { 502 bool insert = false; 503 phys_addr_t obase = base; 504 phys_addr_t end = base + memblock_cap_size(base, &size); 505 int i, nr_new; 506 507 if (!size) 508 return 0; 509 510 /* special case for empty array */ 511 if (type->regions[0].size == 0) { 512 WARN_ON(type->cnt != 1 || type->total_size); 513 type->regions[0].base = base; 514 type->regions[0].size = size; 515 type->regions[0].flags = flags; 516 memblock_set_region_node(&type->regions[0], nid); 517 type->total_size = size; 518 return 0; 519 } 520 repeat: 521 /* 522 * The following is executed twice. Once with %false @insert and 523 * then with %true. The first counts the number of regions needed 524 * to accomodate the new area. The second actually inserts them. 525 */ 526 base = obase; 527 nr_new = 0; 528 529 for (i = 0; i < type->cnt; i++) { 530 struct memblock_region *rgn = &type->regions[i]; 531 phys_addr_t rbase = rgn->base; 532 phys_addr_t rend = rbase + rgn->size; 533 534 if (rbase >= end) 535 break; 536 if (rend <= base) 537 continue; 538 /* 539 * @rgn overlaps. If it separates the lower part of new 540 * area, insert that portion. 541 */ 542 if (rbase > base) { 543 nr_new++; 544 if (insert) 545 memblock_insert_region(type, i++, base, 546 rbase - base, nid, 547 flags); 548 } 549 /* area below @rend is dealt with, forget about it */ 550 base = min(rend, end); 551 } 552 553 /* insert the remaining portion */ 554 if (base < end) { 555 nr_new++; 556 if (insert) 557 memblock_insert_region(type, i, base, end - base, 558 nid, flags); 559 } 560 561 /* 562 * If this was the first round, resize array and repeat for actual 563 * insertions; otherwise, merge and return. 564 */ 565 if (!insert) { 566 while (type->cnt + nr_new > type->max) 567 if (memblock_double_array(type, obase, size) < 0) 568 return -ENOMEM; 569 insert = true; 570 goto repeat; 571 } else { 572 memblock_merge_regions(type); 573 return 0; 574 } 575 } 576 577 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 578 int nid) 579 { 580 return memblock_add_range(&memblock.memory, base, size, nid, 0); 581 } 582 583 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 584 { 585 return memblock_add_range(&memblock.memory, base, size, 586 MAX_NUMNODES, 0); 587 } 588 589 /** 590 * memblock_isolate_range - isolate given range into disjoint memblocks 591 * @type: memblock type to isolate range for 592 * @base: base of range to isolate 593 * @size: size of range to isolate 594 * @start_rgn: out parameter for the start of isolated region 595 * @end_rgn: out parameter for the end of isolated region 596 * 597 * Walk @type and ensure that regions don't cross the boundaries defined by 598 * [@base,@base+@size). Crossing regions are split at the boundaries, 599 * which may create at most two more regions. The index of the first 600 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 601 * 602 * RETURNS: 603 * 0 on success, -errno on failure. 604 */ 605 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 606 phys_addr_t base, phys_addr_t size, 607 int *start_rgn, int *end_rgn) 608 { 609 phys_addr_t end = base + memblock_cap_size(base, &size); 610 int i; 611 612 *start_rgn = *end_rgn = 0; 613 614 if (!size) 615 return 0; 616 617 /* we'll create at most two more regions */ 618 while (type->cnt + 2 > type->max) 619 if (memblock_double_array(type, base, size) < 0) 620 return -ENOMEM; 621 622 for (i = 0; i < type->cnt; i++) { 623 struct memblock_region *rgn = &type->regions[i]; 624 phys_addr_t rbase = rgn->base; 625 phys_addr_t rend = rbase + rgn->size; 626 627 if (rbase >= end) 628 break; 629 if (rend <= base) 630 continue; 631 632 if (rbase < base) { 633 /* 634 * @rgn intersects from below. Split and continue 635 * to process the next region - the new top half. 636 */ 637 rgn->base = base; 638 rgn->size -= base - rbase; 639 type->total_size -= base - rbase; 640 memblock_insert_region(type, i, rbase, base - rbase, 641 memblock_get_region_node(rgn), 642 rgn->flags); 643 } else if (rend > end) { 644 /* 645 * @rgn intersects from above. Split and redo the 646 * current region - the new bottom half. 647 */ 648 rgn->base = end; 649 rgn->size -= end - rbase; 650 type->total_size -= end - rbase; 651 memblock_insert_region(type, i--, rbase, end - rbase, 652 memblock_get_region_node(rgn), 653 rgn->flags); 654 } else { 655 /* @rgn is fully contained, record it */ 656 if (!*end_rgn) 657 *start_rgn = i; 658 *end_rgn = i + 1; 659 } 660 } 661 662 return 0; 663 } 664 665 int __init_memblock memblock_remove_range(struct memblock_type *type, 666 phys_addr_t base, phys_addr_t size) 667 { 668 int start_rgn, end_rgn; 669 int i, ret; 670 671 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 672 if (ret) 673 return ret; 674 675 for (i = end_rgn - 1; i >= start_rgn; i--) 676 memblock_remove_region(type, i); 677 return 0; 678 } 679 680 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 681 { 682 return memblock_remove_range(&memblock.memory, base, size); 683 } 684 685 686 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 687 { 688 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 689 (unsigned long long)base, 690 (unsigned long long)base + size - 1, 691 (void *)_RET_IP_); 692 693 kmemleak_free_part(__va(base), size); 694 return memblock_remove_range(&memblock.reserved, base, size); 695 } 696 697 static int __init_memblock memblock_reserve_region(phys_addr_t base, 698 phys_addr_t size, 699 int nid, 700 unsigned long flags) 701 { 702 struct memblock_type *_rgn = &memblock.reserved; 703 704 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 705 (unsigned long long)base, 706 (unsigned long long)base + size - 1, 707 flags, (void *)_RET_IP_); 708 709 return memblock_add_range(_rgn, base, size, nid, flags); 710 } 711 712 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 713 { 714 return memblock_reserve_region(base, size, MAX_NUMNODES, 0); 715 } 716 717 /** 718 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 719 * @base: the base phys addr of the region 720 * @size: the size of the region 721 * 722 * This function isolates region [@base, @base + @size), and mark it with flag 723 * MEMBLOCK_HOTPLUG. 724 * 725 * Return 0 on succees, -errno on failure. 726 */ 727 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 728 { 729 struct memblock_type *type = &memblock.memory; 730 int i, ret, start_rgn, end_rgn; 731 732 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 733 if (ret) 734 return ret; 735 736 for (i = start_rgn; i < end_rgn; i++) 737 memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG); 738 739 memblock_merge_regions(type); 740 return 0; 741 } 742 743 /** 744 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 745 * @base: the base phys addr of the region 746 * @size: the size of the region 747 * 748 * This function isolates region [@base, @base + @size), and clear flag 749 * MEMBLOCK_HOTPLUG for the isolated regions. 750 * 751 * Return 0 on succees, -errno on failure. 752 */ 753 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 754 { 755 struct memblock_type *type = &memblock.memory; 756 int i, ret, start_rgn, end_rgn; 757 758 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 759 if (ret) 760 return ret; 761 762 for (i = start_rgn; i < end_rgn; i++) 763 memblock_clear_region_flags(&type->regions[i], 764 MEMBLOCK_HOTPLUG); 765 766 memblock_merge_regions(type); 767 return 0; 768 } 769 770 /** 771 * __next__mem_range - next function for for_each_free_mem_range() etc. 772 * @idx: pointer to u64 loop variable 773 * @nid: node selector, %NUMA_NO_NODE for all nodes 774 * @type_a: pointer to memblock_type from where the range is taken 775 * @type_b: pointer to memblock_type which excludes memory from being taken 776 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 777 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 778 * @out_nid: ptr to int for nid of the range, can be %NULL 779 * 780 * Find the first area from *@idx which matches @nid, fill the out 781 * parameters, and update *@idx for the next iteration. The lower 32bit of 782 * *@idx contains index into type_a and the upper 32bit indexes the 783 * areas before each region in type_b. For example, if type_b regions 784 * look like the following, 785 * 786 * 0:[0-16), 1:[32-48), 2:[128-130) 787 * 788 * The upper 32bit indexes the following regions. 789 * 790 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 791 * 792 * As both region arrays are sorted, the function advances the two indices 793 * in lockstep and returns each intersection. 794 */ 795 void __init_memblock __next_mem_range(u64 *idx, int nid, 796 struct memblock_type *type_a, 797 struct memblock_type *type_b, 798 phys_addr_t *out_start, 799 phys_addr_t *out_end, int *out_nid) 800 { 801 int idx_a = *idx & 0xffffffff; 802 int idx_b = *idx >> 32; 803 804 if (WARN_ONCE(nid == MAX_NUMNODES, 805 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 806 nid = NUMA_NO_NODE; 807 808 for (; idx_a < type_a->cnt; idx_a++) { 809 struct memblock_region *m = &type_a->regions[idx_a]; 810 811 phys_addr_t m_start = m->base; 812 phys_addr_t m_end = m->base + m->size; 813 int m_nid = memblock_get_region_node(m); 814 815 /* only memory regions are associated with nodes, check it */ 816 if (nid != NUMA_NO_NODE && nid != m_nid) 817 continue; 818 819 /* skip hotpluggable memory regions if needed */ 820 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 821 continue; 822 823 if (!type_b) { 824 if (out_start) 825 *out_start = m_start; 826 if (out_end) 827 *out_end = m_end; 828 if (out_nid) 829 *out_nid = m_nid; 830 idx_a++; 831 *idx = (u32)idx_a | (u64)idx_b << 32; 832 return; 833 } 834 835 /* scan areas before each reservation */ 836 for (; idx_b < type_b->cnt + 1; idx_b++) { 837 struct memblock_region *r; 838 phys_addr_t r_start; 839 phys_addr_t r_end; 840 841 r = &type_b->regions[idx_b]; 842 r_start = idx_b ? r[-1].base + r[-1].size : 0; 843 r_end = idx_b < type_b->cnt ? 844 r->base : ULLONG_MAX; 845 846 /* 847 * if idx_b advanced past idx_a, 848 * break out to advance idx_a 849 */ 850 if (r_start >= m_end) 851 break; 852 /* if the two regions intersect, we're done */ 853 if (m_start < r_end) { 854 if (out_start) 855 *out_start = 856 max(m_start, r_start); 857 if (out_end) 858 *out_end = min(m_end, r_end); 859 if (out_nid) 860 *out_nid = m_nid; 861 /* 862 * The region which ends first is 863 * advanced for the next iteration. 864 */ 865 if (m_end <= r_end) 866 idx_a++; 867 else 868 idx_b++; 869 *idx = (u32)idx_a | (u64)idx_b << 32; 870 return; 871 } 872 } 873 } 874 875 /* signal end of iteration */ 876 *idx = ULLONG_MAX; 877 } 878 879 /** 880 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 881 * 882 * Finds the next range from type_a which is not marked as unsuitable 883 * in type_b. 884 * 885 * @idx: pointer to u64 loop variable 886 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 887 * @type_a: pointer to memblock_type from where the range is taken 888 * @type_b: pointer to memblock_type which excludes memory from being taken 889 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 890 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 891 * @out_nid: ptr to int for nid of the range, can be %NULL 892 * 893 * Reverse of __next_mem_range(). 894 */ 895 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 896 struct memblock_type *type_a, 897 struct memblock_type *type_b, 898 phys_addr_t *out_start, 899 phys_addr_t *out_end, int *out_nid) 900 { 901 int idx_a = *idx & 0xffffffff; 902 int idx_b = *idx >> 32; 903 904 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 905 nid = NUMA_NO_NODE; 906 907 if (*idx == (u64)ULLONG_MAX) { 908 idx_a = type_a->cnt - 1; 909 idx_b = type_b->cnt; 910 } 911 912 for (; idx_a >= 0; idx_a--) { 913 struct memblock_region *m = &type_a->regions[idx_a]; 914 915 phys_addr_t m_start = m->base; 916 phys_addr_t m_end = m->base + m->size; 917 int m_nid = memblock_get_region_node(m); 918 919 /* only memory regions are associated with nodes, check it */ 920 if (nid != NUMA_NO_NODE && nid != m_nid) 921 continue; 922 923 /* skip hotpluggable memory regions if needed */ 924 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 925 continue; 926 927 if (!type_b) { 928 if (out_start) 929 *out_start = m_start; 930 if (out_end) 931 *out_end = m_end; 932 if (out_nid) 933 *out_nid = m_nid; 934 idx_a++; 935 *idx = (u32)idx_a | (u64)idx_b << 32; 936 return; 937 } 938 939 /* scan areas before each reservation */ 940 for (; idx_b >= 0; idx_b--) { 941 struct memblock_region *r; 942 phys_addr_t r_start; 943 phys_addr_t r_end; 944 945 r = &type_b->regions[idx_b]; 946 r_start = idx_b ? r[-1].base + r[-1].size : 0; 947 r_end = idx_b < type_b->cnt ? 948 r->base : ULLONG_MAX; 949 /* 950 * if idx_b advanced past idx_a, 951 * break out to advance idx_a 952 */ 953 954 if (r_end <= m_start) 955 break; 956 /* if the two regions intersect, we're done */ 957 if (m_end > r_start) { 958 if (out_start) 959 *out_start = max(m_start, r_start); 960 if (out_end) 961 *out_end = min(m_end, r_end); 962 if (out_nid) 963 *out_nid = m_nid; 964 if (m_start >= r_start) 965 idx_a--; 966 else 967 idx_b--; 968 *idx = (u32)idx_a | (u64)idx_b << 32; 969 return; 970 } 971 } 972 } 973 /* signal end of iteration */ 974 *idx = ULLONG_MAX; 975 } 976 977 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 978 /* 979 * Common iterator interface used to define for_each_mem_range(). 980 */ 981 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 982 unsigned long *out_start_pfn, 983 unsigned long *out_end_pfn, int *out_nid) 984 { 985 struct memblock_type *type = &memblock.memory; 986 struct memblock_region *r; 987 988 while (++*idx < type->cnt) { 989 r = &type->regions[*idx]; 990 991 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 992 continue; 993 if (nid == MAX_NUMNODES || nid == r->nid) 994 break; 995 } 996 if (*idx >= type->cnt) { 997 *idx = -1; 998 return; 999 } 1000 1001 if (out_start_pfn) 1002 *out_start_pfn = PFN_UP(r->base); 1003 if (out_end_pfn) 1004 *out_end_pfn = PFN_DOWN(r->base + r->size); 1005 if (out_nid) 1006 *out_nid = r->nid; 1007 } 1008 1009 /** 1010 * memblock_set_node - set node ID on memblock regions 1011 * @base: base of area to set node ID for 1012 * @size: size of area to set node ID for 1013 * @type: memblock type to set node ID for 1014 * @nid: node ID to set 1015 * 1016 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1017 * Regions which cross the area boundaries are split as necessary. 1018 * 1019 * RETURNS: 1020 * 0 on success, -errno on failure. 1021 */ 1022 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1023 struct memblock_type *type, int nid) 1024 { 1025 int start_rgn, end_rgn; 1026 int i, ret; 1027 1028 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1029 if (ret) 1030 return ret; 1031 1032 for (i = start_rgn; i < end_rgn; i++) 1033 memblock_set_region_node(&type->regions[i], nid); 1034 1035 memblock_merge_regions(type); 1036 return 0; 1037 } 1038 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1039 1040 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1041 phys_addr_t align, phys_addr_t start, 1042 phys_addr_t end, int nid) 1043 { 1044 phys_addr_t found; 1045 1046 if (!align) 1047 align = SMP_CACHE_BYTES; 1048 1049 found = memblock_find_in_range_node(size, align, start, end, nid); 1050 if (found && !memblock_reserve(found, size)) { 1051 /* 1052 * The min_count is set to 0 so that memblock allocations are 1053 * never reported as leaks. 1054 */ 1055 kmemleak_alloc(__va(found), size, 0, 0); 1056 return found; 1057 } 1058 return 0; 1059 } 1060 1061 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1062 phys_addr_t start, phys_addr_t end) 1063 { 1064 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 1065 } 1066 1067 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1068 phys_addr_t align, phys_addr_t max_addr, 1069 int nid) 1070 { 1071 return memblock_alloc_range_nid(size, align, 0, max_addr, nid); 1072 } 1073 1074 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1075 { 1076 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 1077 } 1078 1079 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1080 { 1081 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); 1082 } 1083 1084 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1085 { 1086 phys_addr_t alloc; 1087 1088 alloc = __memblock_alloc_base(size, align, max_addr); 1089 1090 if (alloc == 0) 1091 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 1092 (unsigned long long) size, (unsigned long long) max_addr); 1093 1094 return alloc; 1095 } 1096 1097 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1098 { 1099 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1100 } 1101 1102 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1103 { 1104 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1105 1106 if (res) 1107 return res; 1108 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1109 } 1110 1111 /** 1112 * memblock_virt_alloc_internal - allocate boot memory block 1113 * @size: size of memory block to be allocated in bytes 1114 * @align: alignment of the region and block's size 1115 * @min_addr: the lower bound of the memory region to allocate (phys address) 1116 * @max_addr: the upper bound of the memory region to allocate (phys address) 1117 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1118 * 1119 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1120 * will fall back to memory below @min_addr. Also, allocation may fall back 1121 * to any node in the system if the specified node can not 1122 * hold the requested memory. 1123 * 1124 * The allocation is performed from memory region limited by 1125 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1126 * 1127 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1128 * 1129 * The phys address of allocated boot memory block is converted to virtual and 1130 * allocated memory is reset to 0. 1131 * 1132 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1133 * allocated boot memory block, so that it is never reported as leaks. 1134 * 1135 * RETURNS: 1136 * Virtual address of allocated memory block on success, NULL on failure. 1137 */ 1138 static void * __init memblock_virt_alloc_internal( 1139 phys_addr_t size, phys_addr_t align, 1140 phys_addr_t min_addr, phys_addr_t max_addr, 1141 int nid) 1142 { 1143 phys_addr_t alloc; 1144 void *ptr; 1145 1146 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1147 nid = NUMA_NO_NODE; 1148 1149 /* 1150 * Detect any accidental use of these APIs after slab is ready, as at 1151 * this moment memblock may be deinitialized already and its 1152 * internal data may be destroyed (after execution of free_all_bootmem) 1153 */ 1154 if (WARN_ON_ONCE(slab_is_available())) 1155 return kzalloc_node(size, GFP_NOWAIT, nid); 1156 1157 if (!align) 1158 align = SMP_CACHE_BYTES; 1159 1160 if (max_addr > memblock.current_limit) 1161 max_addr = memblock.current_limit; 1162 1163 again: 1164 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1165 nid); 1166 if (alloc) 1167 goto done; 1168 1169 if (nid != NUMA_NO_NODE) { 1170 alloc = memblock_find_in_range_node(size, align, min_addr, 1171 max_addr, NUMA_NO_NODE); 1172 if (alloc) 1173 goto done; 1174 } 1175 1176 if (min_addr) { 1177 min_addr = 0; 1178 goto again; 1179 } else { 1180 goto error; 1181 } 1182 1183 done: 1184 memblock_reserve(alloc, size); 1185 ptr = phys_to_virt(alloc); 1186 memset(ptr, 0, size); 1187 1188 /* 1189 * The min_count is set to 0 so that bootmem allocated blocks 1190 * are never reported as leaks. This is because many of these blocks 1191 * are only referred via the physical address which is not 1192 * looked up by kmemleak. 1193 */ 1194 kmemleak_alloc(ptr, size, 0, 0); 1195 1196 return ptr; 1197 1198 error: 1199 return NULL; 1200 } 1201 1202 /** 1203 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1204 * @size: size of memory block to be allocated in bytes 1205 * @align: alignment of the region and block's size 1206 * @min_addr: the lower bound of the memory region from where the allocation 1207 * is preferred (phys address) 1208 * @max_addr: the upper bound of the memory region from where the allocation 1209 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1210 * allocate only from memory limited by memblock.current_limit value 1211 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1212 * 1213 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 1214 * additional debug information (including caller info), if enabled. 1215 * 1216 * RETURNS: 1217 * Virtual address of allocated memory block on success, NULL on failure. 1218 */ 1219 void * __init memblock_virt_alloc_try_nid_nopanic( 1220 phys_addr_t size, phys_addr_t align, 1221 phys_addr_t min_addr, phys_addr_t max_addr, 1222 int nid) 1223 { 1224 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1225 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1226 (u64)max_addr, (void *)_RET_IP_); 1227 return memblock_virt_alloc_internal(size, align, min_addr, 1228 max_addr, nid); 1229 } 1230 1231 /** 1232 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1233 * @size: size of memory block to be allocated in bytes 1234 * @align: alignment of the region and block's size 1235 * @min_addr: the lower bound of the memory region from where the allocation 1236 * is preferred (phys address) 1237 * @max_addr: the upper bound of the memory region from where the allocation 1238 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1239 * allocate only from memory limited by memblock.current_limit value 1240 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1241 * 1242 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 1243 * which provides debug information (including caller info), if enabled, 1244 * and panics if the request can not be satisfied. 1245 * 1246 * RETURNS: 1247 * Virtual address of allocated memory block on success, NULL on failure. 1248 */ 1249 void * __init memblock_virt_alloc_try_nid( 1250 phys_addr_t size, phys_addr_t align, 1251 phys_addr_t min_addr, phys_addr_t max_addr, 1252 int nid) 1253 { 1254 void *ptr; 1255 1256 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1257 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1258 (u64)max_addr, (void *)_RET_IP_); 1259 ptr = memblock_virt_alloc_internal(size, align, 1260 min_addr, max_addr, nid); 1261 if (ptr) 1262 return ptr; 1263 1264 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1265 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1266 (u64)max_addr); 1267 return NULL; 1268 } 1269 1270 /** 1271 * __memblock_free_early - free boot memory block 1272 * @base: phys starting address of the boot memory block 1273 * @size: size of the boot memory block in bytes 1274 * 1275 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1276 * The freeing memory will not be released to the buddy allocator. 1277 */ 1278 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1279 { 1280 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1281 __func__, (u64)base, (u64)base + size - 1, 1282 (void *)_RET_IP_); 1283 kmemleak_free_part(__va(base), size); 1284 memblock_remove_range(&memblock.reserved, base, size); 1285 } 1286 1287 /* 1288 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1289 * @addr: phys starting address of the boot memory block 1290 * @size: size of the boot memory block in bytes 1291 * 1292 * This is only useful when the bootmem allocator has already been torn 1293 * down, but we are still initializing the system. Pages are released directly 1294 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1295 */ 1296 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1297 { 1298 u64 cursor, end; 1299 1300 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1301 __func__, (u64)base, (u64)base + size - 1, 1302 (void *)_RET_IP_); 1303 kmemleak_free_part(__va(base), size); 1304 cursor = PFN_UP(base); 1305 end = PFN_DOWN(base + size); 1306 1307 for (; cursor < end; cursor++) { 1308 __free_pages_bootmem(pfn_to_page(cursor), 0); 1309 totalram_pages++; 1310 } 1311 } 1312 1313 /* 1314 * Remaining API functions 1315 */ 1316 1317 phys_addr_t __init memblock_phys_mem_size(void) 1318 { 1319 return memblock.memory.total_size; 1320 } 1321 1322 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1323 { 1324 unsigned long pages = 0; 1325 struct memblock_region *r; 1326 unsigned long start_pfn, end_pfn; 1327 1328 for_each_memblock(memory, r) { 1329 start_pfn = memblock_region_memory_base_pfn(r); 1330 end_pfn = memblock_region_memory_end_pfn(r); 1331 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1332 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1333 pages += end_pfn - start_pfn; 1334 } 1335 1336 return PFN_PHYS(pages); 1337 } 1338 1339 /* lowest address */ 1340 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1341 { 1342 return memblock.memory.regions[0].base; 1343 } 1344 1345 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1346 { 1347 int idx = memblock.memory.cnt - 1; 1348 1349 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1350 } 1351 1352 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1353 { 1354 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1355 struct memblock_region *r; 1356 1357 if (!limit) 1358 return; 1359 1360 /* find out max address */ 1361 for_each_memblock(memory, r) { 1362 if (limit <= r->size) { 1363 max_addr = r->base + limit; 1364 break; 1365 } 1366 limit -= r->size; 1367 } 1368 1369 /* truncate both memory and reserved regions */ 1370 memblock_remove_range(&memblock.memory, max_addr, 1371 (phys_addr_t)ULLONG_MAX); 1372 memblock_remove_range(&memblock.reserved, max_addr, 1373 (phys_addr_t)ULLONG_MAX); 1374 } 1375 1376 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1377 { 1378 unsigned int left = 0, right = type->cnt; 1379 1380 do { 1381 unsigned int mid = (right + left) / 2; 1382 1383 if (addr < type->regions[mid].base) 1384 right = mid; 1385 else if (addr >= (type->regions[mid].base + 1386 type->regions[mid].size)) 1387 left = mid + 1; 1388 else 1389 return mid; 1390 } while (left < right); 1391 return -1; 1392 } 1393 1394 int __init memblock_is_reserved(phys_addr_t addr) 1395 { 1396 return memblock_search(&memblock.reserved, addr) != -1; 1397 } 1398 1399 int __init_memblock memblock_is_memory(phys_addr_t addr) 1400 { 1401 return memblock_search(&memblock.memory, addr) != -1; 1402 } 1403 1404 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1405 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1406 unsigned long *start_pfn, unsigned long *end_pfn) 1407 { 1408 struct memblock_type *type = &memblock.memory; 1409 int mid = memblock_search(type, PFN_PHYS(pfn)); 1410 1411 if (mid == -1) 1412 return -1; 1413 1414 *start_pfn = PFN_DOWN(type->regions[mid].base); 1415 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1416 1417 return type->regions[mid].nid; 1418 } 1419 #endif 1420 1421 /** 1422 * memblock_is_region_memory - check if a region is a subset of memory 1423 * @base: base of region to check 1424 * @size: size of region to check 1425 * 1426 * Check if the region [@base, @base+@size) is a subset of a memory block. 1427 * 1428 * RETURNS: 1429 * 0 if false, non-zero if true 1430 */ 1431 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1432 { 1433 int idx = memblock_search(&memblock.memory, base); 1434 phys_addr_t end = base + memblock_cap_size(base, &size); 1435 1436 if (idx == -1) 1437 return 0; 1438 return memblock.memory.regions[idx].base <= base && 1439 (memblock.memory.regions[idx].base + 1440 memblock.memory.regions[idx].size) >= end; 1441 } 1442 1443 /** 1444 * memblock_is_region_reserved - check if a region intersects reserved memory 1445 * @base: base of region to check 1446 * @size: size of region to check 1447 * 1448 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1449 * 1450 * RETURNS: 1451 * 0 if false, non-zero if true 1452 */ 1453 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1454 { 1455 memblock_cap_size(base, &size); 1456 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 1457 } 1458 1459 void __init_memblock memblock_trim_memory(phys_addr_t align) 1460 { 1461 phys_addr_t start, end, orig_start, orig_end; 1462 struct memblock_region *r; 1463 1464 for_each_memblock(memory, r) { 1465 orig_start = r->base; 1466 orig_end = r->base + r->size; 1467 start = round_up(orig_start, align); 1468 end = round_down(orig_end, align); 1469 1470 if (start == orig_start && end == orig_end) 1471 continue; 1472 1473 if (start < end) { 1474 r->base = start; 1475 r->size = end - start; 1476 } else { 1477 memblock_remove_region(&memblock.memory, 1478 r - memblock.memory.regions); 1479 r--; 1480 } 1481 } 1482 } 1483 1484 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1485 { 1486 memblock.current_limit = limit; 1487 } 1488 1489 phys_addr_t __init_memblock memblock_get_current_limit(void) 1490 { 1491 return memblock.current_limit; 1492 } 1493 1494 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 1495 { 1496 unsigned long long base, size; 1497 unsigned long flags; 1498 int i; 1499 1500 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 1501 1502 for (i = 0; i < type->cnt; i++) { 1503 struct memblock_region *rgn = &type->regions[i]; 1504 char nid_buf[32] = ""; 1505 1506 base = rgn->base; 1507 size = rgn->size; 1508 flags = rgn->flags; 1509 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1510 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1511 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1512 memblock_get_region_node(rgn)); 1513 #endif 1514 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 1515 name, i, base, base + size - 1, size, nid_buf, flags); 1516 } 1517 } 1518 1519 void __init_memblock __memblock_dump_all(void) 1520 { 1521 pr_info("MEMBLOCK configuration:\n"); 1522 pr_info(" memory size = %#llx reserved size = %#llx\n", 1523 (unsigned long long)memblock.memory.total_size, 1524 (unsigned long long)memblock.reserved.total_size); 1525 1526 memblock_dump(&memblock.memory, "memory"); 1527 memblock_dump(&memblock.reserved, "reserved"); 1528 } 1529 1530 void __init memblock_allow_resize(void) 1531 { 1532 memblock_can_resize = 1; 1533 } 1534 1535 static int __init early_memblock(char *p) 1536 { 1537 if (p && strstr(p, "debug")) 1538 memblock_debug = 1; 1539 return 0; 1540 } 1541 early_param("memblock", early_memblock); 1542 1543 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1544 1545 static int memblock_debug_show(struct seq_file *m, void *private) 1546 { 1547 struct memblock_type *type = m->private; 1548 struct memblock_region *reg; 1549 int i; 1550 1551 for (i = 0; i < type->cnt; i++) { 1552 reg = &type->regions[i]; 1553 seq_printf(m, "%4d: ", i); 1554 if (sizeof(phys_addr_t) == 4) 1555 seq_printf(m, "0x%08lx..0x%08lx\n", 1556 (unsigned long)reg->base, 1557 (unsigned long)(reg->base + reg->size - 1)); 1558 else 1559 seq_printf(m, "0x%016llx..0x%016llx\n", 1560 (unsigned long long)reg->base, 1561 (unsigned long long)(reg->base + reg->size - 1)); 1562 1563 } 1564 return 0; 1565 } 1566 1567 static int memblock_debug_open(struct inode *inode, struct file *file) 1568 { 1569 return single_open(file, memblock_debug_show, inode->i_private); 1570 } 1571 1572 static const struct file_operations memblock_debug_fops = { 1573 .open = memblock_debug_open, 1574 .read = seq_read, 1575 .llseek = seq_lseek, 1576 .release = single_release, 1577 }; 1578 1579 static int __init memblock_init_debugfs(void) 1580 { 1581 struct dentry *root = debugfs_create_dir("memblock", NULL); 1582 if (!root) 1583 return -ENXIO; 1584 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1585 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1586 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1587 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1588 #endif 1589 1590 return 0; 1591 } 1592 __initcall(memblock_init_debugfs); 1593 1594 #endif /* CONFIG_DEBUG_FS */ 1595