1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 24 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 25 26 struct memblock memblock __initdata_memblock = { 27 .memory.regions = memblock_memory_init_regions, 28 .memory.cnt = 1, /* empty dummy entry */ 29 .memory.max = INIT_MEMBLOCK_REGIONS, 30 31 .reserved.regions = memblock_reserved_init_regions, 32 .reserved.cnt = 1, /* empty dummy entry */ 33 .reserved.max = INIT_MEMBLOCK_REGIONS, 34 35 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 36 }; 37 38 int memblock_debug __initdata_memblock; 39 static int memblock_can_resize __initdata_memblock; 40 41 /* inline so we don't get a warning when pr_debug is compiled out */ 42 static inline const char *memblock_type_name(struct memblock_type *type) 43 { 44 if (type == &memblock.memory) 45 return "memory"; 46 else if (type == &memblock.reserved) 47 return "reserved"; 48 else 49 return "unknown"; 50 } 51 52 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 53 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 54 { 55 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 56 } 57 58 /* 59 * Address comparison utilities 60 */ 61 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 62 phys_addr_t base2, phys_addr_t size2) 63 { 64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 65 } 66 67 static long __init_memblock memblock_overlaps_region(struct memblock_type *type, 68 phys_addr_t base, phys_addr_t size) 69 { 70 unsigned long i; 71 72 for (i = 0; i < type->cnt; i++) { 73 phys_addr_t rgnbase = type->regions[i].base; 74 phys_addr_t rgnsize = type->regions[i].size; 75 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) 76 break; 77 } 78 79 return (i < type->cnt) ? i : -1; 80 } 81 82 /** 83 * memblock_find_in_range_node - find free area in given range and node 84 * @start: start of candidate range 85 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 86 * @size: size of free area to find 87 * @align: alignment of free area to find 88 * @nid: nid of the free area to find, %MAX_NUMNODES for any node 89 * 90 * Find @size free area aligned to @align in the specified range and node. 91 * 92 * RETURNS: 93 * Found address on success, %0 on failure. 94 */ 95 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, 96 phys_addr_t end, phys_addr_t size, 97 phys_addr_t align, int nid) 98 { 99 phys_addr_t this_start, this_end, cand; 100 u64 i; 101 102 /* pump up @end */ 103 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 104 end = memblock.current_limit; 105 106 /* avoid allocating the first page */ 107 start = max_t(phys_addr_t, start, PAGE_SIZE); 108 end = max(start, end); 109 110 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 111 this_start = clamp(this_start, start, end); 112 this_end = clamp(this_end, start, end); 113 114 if (this_end < size) 115 continue; 116 117 cand = round_down(this_end - size, align); 118 if (cand >= this_start) 119 return cand; 120 } 121 return 0; 122 } 123 124 /** 125 * memblock_find_in_range - find free area in given range 126 * @start: start of candidate range 127 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 128 * @size: size of free area to find 129 * @align: alignment of free area to find 130 * 131 * Find @size free area aligned to @align in the specified range. 132 * 133 * RETURNS: 134 * Found address on success, %0 on failure. 135 */ 136 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 137 phys_addr_t end, phys_addr_t size, 138 phys_addr_t align) 139 { 140 return memblock_find_in_range_node(start, end, size, align, 141 MAX_NUMNODES); 142 } 143 144 /* 145 * Free memblock.reserved.regions 146 */ 147 int __init_memblock memblock_free_reserved_regions(void) 148 { 149 if (memblock.reserved.regions == memblock_reserved_init_regions) 150 return 0; 151 152 return memblock_free(__pa(memblock.reserved.regions), 153 sizeof(struct memblock_region) * memblock.reserved.max); 154 } 155 156 /* 157 * Reserve memblock.reserved.regions 158 */ 159 int __init_memblock memblock_reserve_reserved_regions(void) 160 { 161 if (memblock.reserved.regions == memblock_reserved_init_regions) 162 return 0; 163 164 return memblock_reserve(__pa(memblock.reserved.regions), 165 sizeof(struct memblock_region) * memblock.reserved.max); 166 } 167 168 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 169 { 170 type->total_size -= type->regions[r].size; 171 memmove(&type->regions[r], &type->regions[r + 1], 172 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 173 type->cnt--; 174 175 /* Special case for empty arrays */ 176 if (type->cnt == 0) { 177 WARN_ON(type->total_size != 0); 178 type->cnt = 1; 179 type->regions[0].base = 0; 180 type->regions[0].size = 0; 181 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 182 } 183 } 184 185 static int __init_memblock memblock_double_array(struct memblock_type *type) 186 { 187 struct memblock_region *new_array, *old_array; 188 phys_addr_t old_size, new_size, addr; 189 int use_slab = slab_is_available(); 190 191 /* We don't allow resizing until we know about the reserved regions 192 * of memory that aren't suitable for allocation 193 */ 194 if (!memblock_can_resize) 195 return -1; 196 197 /* Calculate new doubled size */ 198 old_size = type->max * sizeof(struct memblock_region); 199 new_size = old_size << 1; 200 201 /* Try to find some space for it. 202 * 203 * WARNING: We assume that either slab_is_available() and we use it or 204 * we use MEMBLOCK for allocations. That means that this is unsafe to use 205 * when bootmem is currently active (unless bootmem itself is implemented 206 * on top of MEMBLOCK which isn't the case yet) 207 * 208 * This should however not be an issue for now, as we currently only 209 * call into MEMBLOCK while it's still active, or much later when slab is 210 * active for memory hotplug operations 211 */ 212 if (use_slab) { 213 new_array = kmalloc(new_size, GFP_KERNEL); 214 addr = new_array ? __pa(new_array) : 0; 215 } else 216 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); 217 if (!addr) { 218 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 219 memblock_type_name(type), type->max, type->max * 2); 220 return -1; 221 } 222 new_array = __va(addr); 223 224 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", 225 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); 226 227 /* Found space, we now need to move the array over before 228 * we add the reserved region since it may be our reserved 229 * array itself that is full. 230 */ 231 memcpy(new_array, type->regions, old_size); 232 memset(new_array + type->max, 0, old_size); 233 old_array = type->regions; 234 type->regions = new_array; 235 type->max <<= 1; 236 237 /* If we use SLAB that's it, we are done */ 238 if (use_slab) 239 return 0; 240 241 /* Add the new reserved region now. Should not fail ! */ 242 BUG_ON(memblock_reserve(addr, new_size)); 243 244 /* If the array wasn't our static init one, then free it. We only do 245 * that before SLAB is available as later on, we don't know whether 246 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal 247 * anyways 248 */ 249 if (old_array != memblock_memory_init_regions && 250 old_array != memblock_reserved_init_regions) 251 memblock_free(__pa(old_array), old_size); 252 253 return 0; 254 } 255 256 /** 257 * memblock_merge_regions - merge neighboring compatible regions 258 * @type: memblock type to scan 259 * 260 * Scan @type and merge neighboring compatible regions. 261 */ 262 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 263 { 264 int i = 0; 265 266 /* cnt never goes below 1 */ 267 while (i < type->cnt - 1) { 268 struct memblock_region *this = &type->regions[i]; 269 struct memblock_region *next = &type->regions[i + 1]; 270 271 if (this->base + this->size != next->base || 272 memblock_get_region_node(this) != 273 memblock_get_region_node(next)) { 274 BUG_ON(this->base + this->size > next->base); 275 i++; 276 continue; 277 } 278 279 this->size += next->size; 280 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); 281 type->cnt--; 282 } 283 } 284 285 /** 286 * memblock_insert_region - insert new memblock region 287 * @type: memblock type to insert into 288 * @idx: index for the insertion point 289 * @base: base address of the new region 290 * @size: size of the new region 291 * 292 * Insert new memblock region [@base,@base+@size) into @type at @idx. 293 * @type must already have extra room to accomodate the new region. 294 */ 295 static void __init_memblock memblock_insert_region(struct memblock_type *type, 296 int idx, phys_addr_t base, 297 phys_addr_t size, int nid) 298 { 299 struct memblock_region *rgn = &type->regions[idx]; 300 301 BUG_ON(type->cnt >= type->max); 302 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 303 rgn->base = base; 304 rgn->size = size; 305 memblock_set_region_node(rgn, nid); 306 type->cnt++; 307 type->total_size += size; 308 } 309 310 /** 311 * memblock_add_region - add new memblock region 312 * @type: memblock type to add new region into 313 * @base: base address of the new region 314 * @size: size of the new region 315 * @nid: nid of the new region 316 * 317 * Add new memblock region [@base,@base+@size) into @type. The new region 318 * is allowed to overlap with existing ones - overlaps don't affect already 319 * existing regions. @type is guaranteed to be minimal (all neighbouring 320 * compatible regions are merged) after the addition. 321 * 322 * RETURNS: 323 * 0 on success, -errno on failure. 324 */ 325 static int __init_memblock memblock_add_region(struct memblock_type *type, 326 phys_addr_t base, phys_addr_t size, int nid) 327 { 328 bool insert = false; 329 phys_addr_t obase = base; 330 phys_addr_t end = base + memblock_cap_size(base, &size); 331 int i, nr_new; 332 333 if (!size) 334 return 0; 335 336 /* special case for empty array */ 337 if (type->regions[0].size == 0) { 338 WARN_ON(type->cnt != 1 || type->total_size); 339 type->regions[0].base = base; 340 type->regions[0].size = size; 341 memblock_set_region_node(&type->regions[0], nid); 342 type->total_size = size; 343 return 0; 344 } 345 repeat: 346 /* 347 * The following is executed twice. Once with %false @insert and 348 * then with %true. The first counts the number of regions needed 349 * to accomodate the new area. The second actually inserts them. 350 */ 351 base = obase; 352 nr_new = 0; 353 354 for (i = 0; i < type->cnt; i++) { 355 struct memblock_region *rgn = &type->regions[i]; 356 phys_addr_t rbase = rgn->base; 357 phys_addr_t rend = rbase + rgn->size; 358 359 if (rbase >= end) 360 break; 361 if (rend <= base) 362 continue; 363 /* 364 * @rgn overlaps. If it separates the lower part of new 365 * area, insert that portion. 366 */ 367 if (rbase > base) { 368 nr_new++; 369 if (insert) 370 memblock_insert_region(type, i++, base, 371 rbase - base, nid); 372 } 373 /* area below @rend is dealt with, forget about it */ 374 base = min(rend, end); 375 } 376 377 /* insert the remaining portion */ 378 if (base < end) { 379 nr_new++; 380 if (insert) 381 memblock_insert_region(type, i, base, end - base, nid); 382 } 383 384 /* 385 * If this was the first round, resize array and repeat for actual 386 * insertions; otherwise, merge and return. 387 */ 388 if (!insert) { 389 while (type->cnt + nr_new > type->max) 390 if (memblock_double_array(type) < 0) 391 return -ENOMEM; 392 insert = true; 393 goto repeat; 394 } else { 395 memblock_merge_regions(type); 396 return 0; 397 } 398 } 399 400 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 401 int nid) 402 { 403 return memblock_add_region(&memblock.memory, base, size, nid); 404 } 405 406 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 407 { 408 return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); 409 } 410 411 /** 412 * memblock_isolate_range - isolate given range into disjoint memblocks 413 * @type: memblock type to isolate range for 414 * @base: base of range to isolate 415 * @size: size of range to isolate 416 * @start_rgn: out parameter for the start of isolated region 417 * @end_rgn: out parameter for the end of isolated region 418 * 419 * Walk @type and ensure that regions don't cross the boundaries defined by 420 * [@base,@base+@size). Crossing regions are split at the boundaries, 421 * which may create at most two more regions. The index of the first 422 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 423 * 424 * RETURNS: 425 * 0 on success, -errno on failure. 426 */ 427 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 428 phys_addr_t base, phys_addr_t size, 429 int *start_rgn, int *end_rgn) 430 { 431 phys_addr_t end = base + memblock_cap_size(base, &size); 432 int i; 433 434 *start_rgn = *end_rgn = 0; 435 436 if (!size) 437 return 0; 438 439 /* we'll create at most two more regions */ 440 while (type->cnt + 2 > type->max) 441 if (memblock_double_array(type) < 0) 442 return -ENOMEM; 443 444 for (i = 0; i < type->cnt; i++) { 445 struct memblock_region *rgn = &type->regions[i]; 446 phys_addr_t rbase = rgn->base; 447 phys_addr_t rend = rbase + rgn->size; 448 449 if (rbase >= end) 450 break; 451 if (rend <= base) 452 continue; 453 454 if (rbase < base) { 455 /* 456 * @rgn intersects from below. Split and continue 457 * to process the next region - the new top half. 458 */ 459 rgn->base = base; 460 rgn->size -= base - rbase; 461 type->total_size -= base - rbase; 462 memblock_insert_region(type, i, rbase, base - rbase, 463 memblock_get_region_node(rgn)); 464 } else if (rend > end) { 465 /* 466 * @rgn intersects from above. Split and redo the 467 * current region - the new bottom half. 468 */ 469 rgn->base = end; 470 rgn->size -= end - rbase; 471 type->total_size -= end - rbase; 472 memblock_insert_region(type, i--, rbase, end - rbase, 473 memblock_get_region_node(rgn)); 474 } else { 475 /* @rgn is fully contained, record it */ 476 if (!*end_rgn) 477 *start_rgn = i; 478 *end_rgn = i + 1; 479 } 480 } 481 482 return 0; 483 } 484 485 static int __init_memblock __memblock_remove(struct memblock_type *type, 486 phys_addr_t base, phys_addr_t size) 487 { 488 int start_rgn, end_rgn; 489 int i, ret; 490 491 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 492 if (ret) 493 return ret; 494 495 for (i = end_rgn - 1; i >= start_rgn; i--) 496 memblock_remove_region(type, i); 497 return 0; 498 } 499 500 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 501 { 502 return __memblock_remove(&memblock.memory, base, size); 503 } 504 505 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 506 { 507 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 508 (unsigned long long)base, 509 (unsigned long long)base + size, 510 (void *)_RET_IP_); 511 512 return __memblock_remove(&memblock.reserved, base, size); 513 } 514 515 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 516 { 517 struct memblock_type *_rgn = &memblock.reserved; 518 519 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", 520 (unsigned long long)base, 521 (unsigned long long)base + size, 522 (void *)_RET_IP_); 523 524 return memblock_add_region(_rgn, base, size, MAX_NUMNODES); 525 } 526 527 /** 528 * __next_free_mem_range - next function for for_each_free_mem_range() 529 * @idx: pointer to u64 loop variable 530 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 531 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 532 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 533 * @p_nid: ptr to int for nid of the range, can be %NULL 534 * 535 * Find the first free area from *@idx which matches @nid, fill the out 536 * parameters, and update *@idx for the next iteration. The lower 32bit of 537 * *@idx contains index into memory region and the upper 32bit indexes the 538 * areas before each reserved region. For example, if reserved regions 539 * look like the following, 540 * 541 * 0:[0-16), 1:[32-48), 2:[128-130) 542 * 543 * The upper 32bit indexes the following regions. 544 * 545 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 546 * 547 * As both region arrays are sorted, the function advances the two indices 548 * in lockstep and returns each intersection. 549 */ 550 void __init_memblock __next_free_mem_range(u64 *idx, int nid, 551 phys_addr_t *out_start, 552 phys_addr_t *out_end, int *out_nid) 553 { 554 struct memblock_type *mem = &memblock.memory; 555 struct memblock_type *rsv = &memblock.reserved; 556 int mi = *idx & 0xffffffff; 557 int ri = *idx >> 32; 558 559 for ( ; mi < mem->cnt; mi++) { 560 struct memblock_region *m = &mem->regions[mi]; 561 phys_addr_t m_start = m->base; 562 phys_addr_t m_end = m->base + m->size; 563 564 /* only memory regions are associated with nodes, check it */ 565 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 566 continue; 567 568 /* scan areas before each reservation for intersection */ 569 for ( ; ri < rsv->cnt + 1; ri++) { 570 struct memblock_region *r = &rsv->regions[ri]; 571 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; 572 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; 573 574 /* if ri advanced past mi, break out to advance mi */ 575 if (r_start >= m_end) 576 break; 577 /* if the two regions intersect, we're done */ 578 if (m_start < r_end) { 579 if (out_start) 580 *out_start = max(m_start, r_start); 581 if (out_end) 582 *out_end = min(m_end, r_end); 583 if (out_nid) 584 *out_nid = memblock_get_region_node(m); 585 /* 586 * The region which ends first is advanced 587 * for the next iteration. 588 */ 589 if (m_end <= r_end) 590 mi++; 591 else 592 ri++; 593 *idx = (u32)mi | (u64)ri << 32; 594 return; 595 } 596 } 597 } 598 599 /* signal end of iteration */ 600 *idx = ULLONG_MAX; 601 } 602 603 /** 604 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() 605 * @idx: pointer to u64 loop variable 606 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 607 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 608 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 609 * @p_nid: ptr to int for nid of the range, can be %NULL 610 * 611 * Reverse of __next_free_mem_range(). 612 */ 613 void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, 614 phys_addr_t *out_start, 615 phys_addr_t *out_end, int *out_nid) 616 { 617 struct memblock_type *mem = &memblock.memory; 618 struct memblock_type *rsv = &memblock.reserved; 619 int mi = *idx & 0xffffffff; 620 int ri = *idx >> 32; 621 622 if (*idx == (u64)ULLONG_MAX) { 623 mi = mem->cnt - 1; 624 ri = rsv->cnt; 625 } 626 627 for ( ; mi >= 0; mi--) { 628 struct memblock_region *m = &mem->regions[mi]; 629 phys_addr_t m_start = m->base; 630 phys_addr_t m_end = m->base + m->size; 631 632 /* only memory regions are associated with nodes, check it */ 633 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) 634 continue; 635 636 /* scan areas before each reservation for intersection */ 637 for ( ; ri >= 0; ri--) { 638 struct memblock_region *r = &rsv->regions[ri]; 639 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; 640 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; 641 642 /* if ri advanced past mi, break out to advance mi */ 643 if (r_end <= m_start) 644 break; 645 /* if the two regions intersect, we're done */ 646 if (m_end > r_start) { 647 if (out_start) 648 *out_start = max(m_start, r_start); 649 if (out_end) 650 *out_end = min(m_end, r_end); 651 if (out_nid) 652 *out_nid = memblock_get_region_node(m); 653 654 if (m_start >= r_start) 655 mi--; 656 else 657 ri--; 658 *idx = (u32)mi | (u64)ri << 32; 659 return; 660 } 661 } 662 } 663 664 *idx = ULLONG_MAX; 665 } 666 667 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 668 /* 669 * Common iterator interface used to define for_each_mem_range(). 670 */ 671 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 672 unsigned long *out_start_pfn, 673 unsigned long *out_end_pfn, int *out_nid) 674 { 675 struct memblock_type *type = &memblock.memory; 676 struct memblock_region *r; 677 678 while (++*idx < type->cnt) { 679 r = &type->regions[*idx]; 680 681 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 682 continue; 683 if (nid == MAX_NUMNODES || nid == r->nid) 684 break; 685 } 686 if (*idx >= type->cnt) { 687 *idx = -1; 688 return; 689 } 690 691 if (out_start_pfn) 692 *out_start_pfn = PFN_UP(r->base); 693 if (out_end_pfn) 694 *out_end_pfn = PFN_DOWN(r->base + r->size); 695 if (out_nid) 696 *out_nid = r->nid; 697 } 698 699 /** 700 * memblock_set_node - set node ID on memblock regions 701 * @base: base of area to set node ID for 702 * @size: size of area to set node ID for 703 * @nid: node ID to set 704 * 705 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. 706 * Regions which cross the area boundaries are split as necessary. 707 * 708 * RETURNS: 709 * 0 on success, -errno on failure. 710 */ 711 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 712 int nid) 713 { 714 struct memblock_type *type = &memblock.memory; 715 int start_rgn, end_rgn; 716 int i, ret; 717 718 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 719 if (ret) 720 return ret; 721 722 for (i = start_rgn; i < end_rgn; i++) 723 type->regions[i].nid = nid; 724 725 memblock_merge_regions(type); 726 return 0; 727 } 728 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 729 730 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 731 phys_addr_t align, phys_addr_t max_addr, 732 int nid) 733 { 734 phys_addr_t found; 735 736 /* align @size to avoid excessive fragmentation on reserved array */ 737 size = round_up(size, align); 738 739 found = memblock_find_in_range_node(0, max_addr, size, align, nid); 740 if (found && !memblock_reserve(found, size)) 741 return found; 742 743 return 0; 744 } 745 746 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 747 { 748 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 749 } 750 751 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 752 { 753 return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); 754 } 755 756 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 757 { 758 phys_addr_t alloc; 759 760 alloc = __memblock_alloc_base(size, align, max_addr); 761 762 if (alloc == 0) 763 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 764 (unsigned long long) size, (unsigned long long) max_addr); 765 766 return alloc; 767 } 768 769 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 770 { 771 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 772 } 773 774 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 775 { 776 phys_addr_t res = memblock_alloc_nid(size, align, nid); 777 778 if (res) 779 return res; 780 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 781 } 782 783 784 /* 785 * Remaining API functions 786 */ 787 788 phys_addr_t __init memblock_phys_mem_size(void) 789 { 790 return memblock.memory.total_size; 791 } 792 793 /* lowest address */ 794 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 795 { 796 return memblock.memory.regions[0].base; 797 } 798 799 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 800 { 801 int idx = memblock.memory.cnt - 1; 802 803 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 804 } 805 806 void __init memblock_enforce_memory_limit(phys_addr_t limit) 807 { 808 unsigned long i; 809 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 810 811 if (!limit) 812 return; 813 814 /* find out max address */ 815 for (i = 0; i < memblock.memory.cnt; i++) { 816 struct memblock_region *r = &memblock.memory.regions[i]; 817 818 if (limit <= r->size) { 819 max_addr = r->base + limit; 820 break; 821 } 822 limit -= r->size; 823 } 824 825 /* truncate both memory and reserved regions */ 826 __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); 827 __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); 828 } 829 830 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 831 { 832 unsigned int left = 0, right = type->cnt; 833 834 do { 835 unsigned int mid = (right + left) / 2; 836 837 if (addr < type->regions[mid].base) 838 right = mid; 839 else if (addr >= (type->regions[mid].base + 840 type->regions[mid].size)) 841 left = mid + 1; 842 else 843 return mid; 844 } while (left < right); 845 return -1; 846 } 847 848 int __init memblock_is_reserved(phys_addr_t addr) 849 { 850 return memblock_search(&memblock.reserved, addr) != -1; 851 } 852 853 int __init_memblock memblock_is_memory(phys_addr_t addr) 854 { 855 return memblock_search(&memblock.memory, addr) != -1; 856 } 857 858 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 859 { 860 int idx = memblock_search(&memblock.memory, base); 861 phys_addr_t end = base + memblock_cap_size(base, &size); 862 863 if (idx == -1) 864 return 0; 865 return memblock.memory.regions[idx].base <= base && 866 (memblock.memory.regions[idx].base + 867 memblock.memory.regions[idx].size) >= end; 868 } 869 870 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 871 { 872 memblock_cap_size(base, &size); 873 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; 874 } 875 876 877 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 878 { 879 memblock.current_limit = limit; 880 } 881 882 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 883 { 884 unsigned long long base, size; 885 int i; 886 887 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 888 889 for (i = 0; i < type->cnt; i++) { 890 struct memblock_region *rgn = &type->regions[i]; 891 char nid_buf[32] = ""; 892 893 base = rgn->base; 894 size = rgn->size; 895 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 896 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 897 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 898 memblock_get_region_node(rgn)); 899 #endif 900 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", 901 name, i, base, base + size - 1, size, nid_buf); 902 } 903 } 904 905 void __init_memblock __memblock_dump_all(void) 906 { 907 pr_info("MEMBLOCK configuration:\n"); 908 pr_info(" memory size = %#llx reserved size = %#llx\n", 909 (unsigned long long)memblock.memory.total_size, 910 (unsigned long long)memblock.reserved.total_size); 911 912 memblock_dump(&memblock.memory, "memory"); 913 memblock_dump(&memblock.reserved, "reserved"); 914 } 915 916 void __init memblock_allow_resize(void) 917 { 918 memblock_can_resize = 1; 919 } 920 921 static int __init early_memblock(char *p) 922 { 923 if (p && strstr(p, "debug")) 924 memblock_debug = 1; 925 return 0; 926 } 927 early_param("memblock", early_memblock); 928 929 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 930 931 static int memblock_debug_show(struct seq_file *m, void *private) 932 { 933 struct memblock_type *type = m->private; 934 struct memblock_region *reg; 935 int i; 936 937 for (i = 0; i < type->cnt; i++) { 938 reg = &type->regions[i]; 939 seq_printf(m, "%4d: ", i); 940 if (sizeof(phys_addr_t) == 4) 941 seq_printf(m, "0x%08lx..0x%08lx\n", 942 (unsigned long)reg->base, 943 (unsigned long)(reg->base + reg->size - 1)); 944 else 945 seq_printf(m, "0x%016llx..0x%016llx\n", 946 (unsigned long long)reg->base, 947 (unsigned long long)(reg->base + reg->size - 1)); 948 949 } 950 return 0; 951 } 952 953 static int memblock_debug_open(struct inode *inode, struct file *file) 954 { 955 return single_open(file, memblock_debug_show, inode->i_private); 956 } 957 958 static const struct file_operations memblock_debug_fops = { 959 .open = memblock_debug_open, 960 .read = seq_read, 961 .llseek = seq_lseek, 962 .release = single_release, 963 }; 964 965 static int __init memblock_init_debugfs(void) 966 { 967 struct dentry *root = debugfs_create_dir("memblock", NULL); 968 if (!root) 969 return -ENXIO; 970 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 971 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 972 973 return 0; 974 } 975 __initcall(memblock_init_debugfs); 976 977 #endif /* CONFIG_DEBUG_FS */ 978