xref: /openbmc/linux/mm/memblock.c (revision edfd52e6)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 struct memblock memblock __initdata_memblock;
24 
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29 
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
32 {
33 	if (type == &memblock.memory)
34 		return "memory";
35 	else if (type == &memblock.reserved)
36 		return "reserved";
37 	else
38 		return "unknown";
39 }
40 
41 /*
42  * Address comparison utilities
43  */
44 
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46 {
47 	return addr & ~(size - 1);
48 }
49 
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51 {
52 	return (addr + (size - 1)) & ~(size - 1);
53 }
54 
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 				       phys_addr_t base2, phys_addr_t size2)
57 {
58 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59 }
60 
61 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
62 					phys_addr_t base, phys_addr_t size)
63 {
64 	unsigned long i;
65 
66 	for (i = 0; i < type->cnt; i++) {
67 		phys_addr_t rgnbase = type->regions[i].base;
68 		phys_addr_t rgnsize = type->regions[i].size;
69 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
70 			break;
71 	}
72 
73 	return (i < type->cnt) ? i : -1;
74 }
75 
76 /*
77  * Find, allocate, deallocate or reserve unreserved regions. All allocations
78  * are top-down.
79  */
80 
81 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
82 					  phys_addr_t size, phys_addr_t align)
83 {
84 	phys_addr_t base, res_base;
85 	long j;
86 
87 	/* In case, huge size is requested */
88 	if (end < size)
89 		return MEMBLOCK_ERROR;
90 
91 	base = memblock_align_down((end - size), align);
92 
93 	/* Prevent allocations returning 0 as it's also used to
94 	 * indicate an allocation failure
95 	 */
96 	if (start == 0)
97 		start = PAGE_SIZE;
98 
99 	while (start <= base) {
100 		j = memblock_overlaps_region(&memblock.reserved, base, size);
101 		if (j < 0)
102 			return base;
103 		res_base = memblock.reserved.regions[j].base;
104 		if (res_base < size)
105 			break;
106 		base = memblock_align_down(res_base - size, align);
107 	}
108 
109 	return MEMBLOCK_ERROR;
110 }
111 
112 static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
113 			phys_addr_t align, phys_addr_t start, phys_addr_t end)
114 {
115 	long i;
116 
117 	BUG_ON(0 == size);
118 
119 	/* Pump up max_addr */
120 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
121 		end = memblock.current_limit;
122 
123 	/* We do a top-down search, this tends to limit memory
124 	 * fragmentation by keeping early boot allocs near the
125 	 * top of memory
126 	 */
127 	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
128 		phys_addr_t memblockbase = memblock.memory.regions[i].base;
129 		phys_addr_t memblocksize = memblock.memory.regions[i].size;
130 		phys_addr_t bottom, top, found;
131 
132 		if (memblocksize < size)
133 			continue;
134 		if ((memblockbase + memblocksize) <= start)
135 			break;
136 		bottom = max(memblockbase, start);
137 		top = min(memblockbase + memblocksize, end);
138 		if (bottom >= top)
139 			continue;
140 		found = memblock_find_region(bottom, top, size, align);
141 		if (found != MEMBLOCK_ERROR)
142 			return found;
143 	}
144 	return MEMBLOCK_ERROR;
145 }
146 
147 /*
148  * Find a free area with specified alignment in a specific range.
149  */
150 u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
151 {
152 	return memblock_find_base(size, align, start, end);
153 }
154 
155 /*
156  * Free memblock.reserved.regions
157  */
158 int __init_memblock memblock_free_reserved_regions(void)
159 {
160 	if (memblock.reserved.regions == memblock_reserved_init_regions)
161 		return 0;
162 
163 	return memblock_free(__pa(memblock.reserved.regions),
164 		 sizeof(struct memblock_region) * memblock.reserved.max);
165 }
166 
167 /*
168  * Reserve memblock.reserved.regions
169  */
170 int __init_memblock memblock_reserve_reserved_regions(void)
171 {
172 	if (memblock.reserved.regions == memblock_reserved_init_regions)
173 		return 0;
174 
175 	return memblock_reserve(__pa(memblock.reserved.regions),
176 		 sizeof(struct memblock_region) * memblock.reserved.max);
177 }
178 
179 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
180 {
181 	unsigned long i;
182 
183 	for (i = r; i < type->cnt - 1; i++) {
184 		type->regions[i].base = type->regions[i + 1].base;
185 		type->regions[i].size = type->regions[i + 1].size;
186 	}
187 	type->cnt--;
188 
189 	/* Special case for empty arrays */
190 	if (type->cnt == 0) {
191 		type->cnt = 1;
192 		type->regions[0].base = 0;
193 		type->regions[0].size = 0;
194 	}
195 }
196 
197 /* Defined below but needed now */
198 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
199 
200 static int __init_memblock memblock_double_array(struct memblock_type *type)
201 {
202 	struct memblock_region *new_array, *old_array;
203 	phys_addr_t old_size, new_size, addr;
204 	int use_slab = slab_is_available();
205 
206 	/* We don't allow resizing until we know about the reserved regions
207 	 * of memory that aren't suitable for allocation
208 	 */
209 	if (!memblock_can_resize)
210 		return -1;
211 
212 	/* Calculate new doubled size */
213 	old_size = type->max * sizeof(struct memblock_region);
214 	new_size = old_size << 1;
215 
216 	/* Try to find some space for it.
217 	 *
218 	 * WARNING: We assume that either slab_is_available() and we use it or
219 	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
220 	 * when bootmem is currently active (unless bootmem itself is implemented
221 	 * on top of MEMBLOCK which isn't the case yet)
222 	 *
223 	 * This should however not be an issue for now, as we currently only
224 	 * call into MEMBLOCK while it's still active, or much later when slab is
225 	 * active for memory hotplug operations
226 	 */
227 	if (use_slab) {
228 		new_array = kmalloc(new_size, GFP_KERNEL);
229 		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
230 	} else
231 		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
232 	if (addr == MEMBLOCK_ERROR) {
233 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
234 		       memblock_type_name(type), type->max, type->max * 2);
235 		return -1;
236 	}
237 	new_array = __va(addr);
238 
239 	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
240 		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
241 
242 	/* Found space, we now need to move the array over before
243 	 * we add the reserved region since it may be our reserved
244 	 * array itself that is full.
245 	 */
246 	memcpy(new_array, type->regions, old_size);
247 	memset(new_array + type->max, 0, old_size);
248 	old_array = type->regions;
249 	type->regions = new_array;
250 	type->max <<= 1;
251 
252 	/* If we use SLAB that's it, we are done */
253 	if (use_slab)
254 		return 0;
255 
256 	/* Add the new reserved region now. Should not fail ! */
257 	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
258 
259 	/* If the array wasn't our static init one, then free it. We only do
260 	 * that before SLAB is available as later on, we don't know whether
261 	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
262 	 * anyways
263 	 */
264 	if (old_array != memblock_memory_init_regions &&
265 	    old_array != memblock_reserved_init_regions)
266 		memblock_free(__pa(old_array), old_size);
267 
268 	return 0;
269 }
270 
271 int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
272 					  phys_addr_t addr2, phys_addr_t size2)
273 {
274 	return 1;
275 }
276 
277 static long __init_memblock memblock_add_region(struct memblock_type *type,
278 						phys_addr_t base, phys_addr_t size)
279 {
280 	phys_addr_t end = base + size;
281 	int i, slot = -1;
282 
283 	/* First try and coalesce this MEMBLOCK with others */
284 	for (i = 0; i < type->cnt; i++) {
285 		struct memblock_region *rgn = &type->regions[i];
286 		phys_addr_t rend = rgn->base + rgn->size;
287 
288 		/* Exit if there's no possible hits */
289 		if (rgn->base > end || rgn->size == 0)
290 			break;
291 
292 		/* Check if we are fully enclosed within an existing
293 		 * block
294 		 */
295 		if (rgn->base <= base && rend >= end)
296 			return 0;
297 
298 		/* Check if we overlap or are adjacent with the bottom
299 		 * of a block.
300 		 */
301 		if (base < rgn->base && end >= rgn->base) {
302 			/* If we can't coalesce, create a new block */
303 			if (!memblock_memory_can_coalesce(base, size,
304 							  rgn->base,
305 							  rgn->size)) {
306 				/* Overlap & can't coalesce are mutually
307 				 * exclusive, if you do that, be prepared
308 				 * for trouble
309 				 */
310 				WARN_ON(end != rgn->base);
311 				goto new_block;
312 			}
313 			/* We extend the bottom of the block down to our
314 			 * base
315 			 */
316 			rgn->base = base;
317 			rgn->size = rend - base;
318 
319 			/* Return if we have nothing else to allocate
320 			 * (fully coalesced)
321 			 */
322 			if (rend >= end)
323 				return 0;
324 
325 			/* We continue processing from the end of the
326 			 * coalesced block.
327 			 */
328 			base = rend;
329 			size = end - base;
330 		}
331 
332 		/* Now check if we overlap or are adjacent with the
333 		 * top of a block
334 		 */
335 		if (base <= rend && end >= rend) {
336 			/* If we can't coalesce, create a new block */
337 			if (!memblock_memory_can_coalesce(rgn->base,
338 							  rgn->size,
339 							  base, size)) {
340 				/* Overlap & can't coalesce are mutually
341 				 * exclusive, if you do that, be prepared
342 				 * for trouble
343 				 */
344 				WARN_ON(rend != base);
345 				goto new_block;
346 			}
347 			/* We adjust our base down to enclose the
348 			 * original block and destroy it. It will be
349 			 * part of our new allocation. Since we've
350 			 * freed an entry, we know we won't fail
351 			 * to allocate one later, so we won't risk
352 			 * losing the original block allocation.
353 			 */
354 			size += (base - rgn->base);
355 			base = rgn->base;
356 			memblock_remove_region(type, i--);
357 		}
358 	}
359 
360 	/* If the array is empty, special case, replace the fake
361 	 * filler region and return
362 	 */
363 	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
364 		type->regions[0].base = base;
365 		type->regions[0].size = size;
366 		return 0;
367 	}
368 
369  new_block:
370 	/* If we are out of space, we fail. It's too late to resize the array
371 	 * but then this shouldn't have happened in the first place.
372 	 */
373 	if (WARN_ON(type->cnt >= type->max))
374 		return -1;
375 
376 	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
377 	for (i = type->cnt - 1; i >= 0; i--) {
378 		if (base < type->regions[i].base) {
379 			type->regions[i+1].base = type->regions[i].base;
380 			type->regions[i+1].size = type->regions[i].size;
381 		} else {
382 			type->regions[i+1].base = base;
383 			type->regions[i+1].size = size;
384 			slot = i + 1;
385 			break;
386 		}
387 	}
388 	if (base < type->regions[0].base) {
389 		type->regions[0].base = base;
390 		type->regions[0].size = size;
391 		slot = 0;
392 	}
393 	type->cnt++;
394 
395 	/* The array is full ? Try to resize it. If that fails, we undo
396 	 * our allocation and return an error
397 	 */
398 	if (type->cnt == type->max && memblock_double_array(type)) {
399 		BUG_ON(slot < 0);
400 		memblock_remove_region(type, slot);
401 		return -1;
402 	}
403 
404 	return 0;
405 }
406 
407 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
408 {
409 	return memblock_add_region(&memblock.memory, base, size);
410 
411 }
412 
413 static long __init_memblock __memblock_remove(struct memblock_type *type,
414 					      phys_addr_t base, phys_addr_t size)
415 {
416 	phys_addr_t end = base + size;
417 	int i;
418 
419 	/* Walk through the array for collisions */
420 	for (i = 0; i < type->cnt; i++) {
421 		struct memblock_region *rgn = &type->regions[i];
422 		phys_addr_t rend = rgn->base + rgn->size;
423 
424 		/* Nothing more to do, exit */
425 		if (rgn->base > end || rgn->size == 0)
426 			break;
427 
428 		/* If we fully enclose the block, drop it */
429 		if (base <= rgn->base && end >= rend) {
430 			memblock_remove_region(type, i--);
431 			continue;
432 		}
433 
434 		/* If we are fully enclosed within a block
435 		 * then we need to split it and we are done
436 		 */
437 		if (base > rgn->base && end < rend) {
438 			rgn->size = base - rgn->base;
439 			if (!memblock_add_region(type, end, rend - end))
440 				return 0;
441 			/* Failure to split is bad, we at least
442 			 * restore the block before erroring
443 			 */
444 			rgn->size = rend - rgn->base;
445 			WARN_ON(1);
446 			return -1;
447 		}
448 
449 		/* Check if we need to trim the bottom of a block */
450 		if (rgn->base < end && rend > end) {
451 			rgn->size -= end - rgn->base;
452 			rgn->base = end;
453 			break;
454 		}
455 
456 		/* And check if we need to trim the top of a block */
457 		if (base < rend)
458 			rgn->size -= rend - base;
459 
460 	}
461 	return 0;
462 }
463 
464 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
465 {
466 	return __memblock_remove(&memblock.memory, base, size);
467 }
468 
469 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
470 {
471 	return __memblock_remove(&memblock.reserved, base, size);
472 }
473 
474 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
475 {
476 	struct memblock_type *_rgn = &memblock.reserved;
477 
478 	BUG_ON(0 == size);
479 
480 	return memblock_add_region(_rgn, base, size);
481 }
482 
483 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
484 {
485 	phys_addr_t found;
486 
487 	/* We align the size to limit fragmentation. Without this, a lot of
488 	 * small allocs quickly eat up the whole reserve array on sparc
489 	 */
490 	size = memblock_align_up(size, align);
491 
492 	found = memblock_find_base(size, align, 0, max_addr);
493 	if (found != MEMBLOCK_ERROR &&
494 	    !memblock_add_region(&memblock.reserved, found, size))
495 		return found;
496 
497 	return 0;
498 }
499 
500 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
501 {
502 	phys_addr_t alloc;
503 
504 	alloc = __memblock_alloc_base(size, align, max_addr);
505 
506 	if (alloc == 0)
507 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
508 		      (unsigned long long) size, (unsigned long long) max_addr);
509 
510 	return alloc;
511 }
512 
513 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
514 {
515 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
516 }
517 
518 
519 /*
520  * Additional node-local allocators. Search for node memory is bottom up
521  * and walks memblock regions within that node bottom-up as well, but allocation
522  * within an memblock region is top-down. XXX I plan to fix that at some stage
523  *
524  * WARNING: Only available after early_node_map[] has been populated,
525  * on some architectures, that is after all the calls to add_active_range()
526  * have been done to populate it.
527  */
528 
529 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
530 {
531 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
532 	/*
533 	 * This code originates from sparc which really wants use to walk by addresses
534 	 * and returns the nid. This is not very convenient for early_pfn_map[] users
535 	 * as the map isn't sorted yet, and it really wants to be walked by nid.
536 	 *
537 	 * For now, I implement the inefficient method below which walks the early
538 	 * map multiple times. Eventually we may want to use an ARCH config option
539 	 * to implement a completely different method for both case.
540 	 */
541 	unsigned long start_pfn, end_pfn;
542 	int i;
543 
544 	for (i = 0; i < MAX_NUMNODES; i++) {
545 		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
546 		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
547 			continue;
548 		*nid = i;
549 		return min(end, PFN_PHYS(end_pfn));
550 	}
551 #endif
552 	*nid = 0;
553 
554 	return end;
555 }
556 
557 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
558 					       phys_addr_t size,
559 					       phys_addr_t align, int nid)
560 {
561 	phys_addr_t start, end;
562 
563 	start = mp->base;
564 	end = start + mp->size;
565 
566 	start = memblock_align_up(start, align);
567 	while (start < end) {
568 		phys_addr_t this_end;
569 		int this_nid;
570 
571 		this_end = memblock_nid_range(start, end, &this_nid);
572 		if (this_nid == nid) {
573 			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
574 			if (ret != MEMBLOCK_ERROR &&
575 			    !memblock_add_region(&memblock.reserved, ret, size))
576 				return ret;
577 		}
578 		start = this_end;
579 	}
580 
581 	return MEMBLOCK_ERROR;
582 }
583 
584 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
585 {
586 	struct memblock_type *mem = &memblock.memory;
587 	int i;
588 
589 	BUG_ON(0 == size);
590 
591 	/* We align the size to limit fragmentation. Without this, a lot of
592 	 * small allocs quickly eat up the whole reserve array on sparc
593 	 */
594 	size = memblock_align_up(size, align);
595 
596 	/* We do a bottom-up search for a region with the right
597 	 * nid since that's easier considering how memblock_nid_range()
598 	 * works
599 	 */
600 	for (i = 0; i < mem->cnt; i++) {
601 		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
602 					       size, align, nid);
603 		if (ret != MEMBLOCK_ERROR)
604 			return ret;
605 	}
606 
607 	return 0;
608 }
609 
610 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
611 {
612 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
613 
614 	if (res)
615 		return res;
616 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
617 }
618 
619 
620 /*
621  * Remaining API functions
622  */
623 
624 /* You must call memblock_analyze() before this. */
625 phys_addr_t __init memblock_phys_mem_size(void)
626 {
627 	return memblock.memory_size;
628 }
629 
630 /* lowest address */
631 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
632 {
633 	return memblock.memory.regions[0].base;
634 }
635 
636 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
637 {
638 	int idx = memblock.memory.cnt - 1;
639 
640 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
641 }
642 
643 /* You must call memblock_analyze() after this. */
644 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
645 {
646 	unsigned long i;
647 	phys_addr_t limit;
648 	struct memblock_region *p;
649 
650 	if (!memory_limit)
651 		return;
652 
653 	/* Truncate the memblock regions to satisfy the memory limit. */
654 	limit = memory_limit;
655 	for (i = 0; i < memblock.memory.cnt; i++) {
656 		if (limit > memblock.memory.regions[i].size) {
657 			limit -= memblock.memory.regions[i].size;
658 			continue;
659 		}
660 
661 		memblock.memory.regions[i].size = limit;
662 		memblock.memory.cnt = i + 1;
663 		break;
664 	}
665 
666 	memory_limit = memblock_end_of_DRAM();
667 
668 	/* And truncate any reserves above the limit also. */
669 	for (i = 0; i < memblock.reserved.cnt; i++) {
670 		p = &memblock.reserved.regions[i];
671 
672 		if (p->base > memory_limit)
673 			p->size = 0;
674 		else if ((p->base + p->size) > memory_limit)
675 			p->size = memory_limit - p->base;
676 
677 		if (p->size == 0) {
678 			memblock_remove_region(&memblock.reserved, i);
679 			i--;
680 		}
681 	}
682 }
683 
684 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
685 {
686 	unsigned int left = 0, right = type->cnt;
687 
688 	do {
689 		unsigned int mid = (right + left) / 2;
690 
691 		if (addr < type->regions[mid].base)
692 			right = mid;
693 		else if (addr >= (type->regions[mid].base +
694 				  type->regions[mid].size))
695 			left = mid + 1;
696 		else
697 			return mid;
698 	} while (left < right);
699 	return -1;
700 }
701 
702 int __init memblock_is_reserved(phys_addr_t addr)
703 {
704 	return memblock_search(&memblock.reserved, addr) != -1;
705 }
706 
707 int __init_memblock memblock_is_memory(phys_addr_t addr)
708 {
709 	return memblock_search(&memblock.memory, addr) != -1;
710 }
711 
712 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
713 {
714 	int idx = memblock_search(&memblock.memory, base);
715 
716 	if (idx == -1)
717 		return 0;
718 	return memblock.memory.regions[idx].base <= base &&
719 		(memblock.memory.regions[idx].base +
720 		 memblock.memory.regions[idx].size) >= (base + size);
721 }
722 
723 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
724 {
725 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
726 }
727 
728 
729 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
730 {
731 	memblock.current_limit = limit;
732 }
733 
734 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
735 {
736 	unsigned long long base, size;
737 	int i;
738 
739 	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
740 
741 	for (i = 0; i < region->cnt; i++) {
742 		base = region->regions[i].base;
743 		size = region->regions[i].size;
744 
745 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
746 		    name, i, base, base + size - 1, size);
747 	}
748 }
749 
750 void __init_memblock memblock_dump_all(void)
751 {
752 	if (!memblock_debug)
753 		return;
754 
755 	pr_info("MEMBLOCK configuration:\n");
756 	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
757 
758 	memblock_dump(&memblock.memory, "memory");
759 	memblock_dump(&memblock.reserved, "reserved");
760 }
761 
762 void __init memblock_analyze(void)
763 {
764 	int i;
765 
766 	/* Check marker in the unused last array entry */
767 	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
768 		!= MEMBLOCK_INACTIVE);
769 	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
770 		!= MEMBLOCK_INACTIVE);
771 
772 	memblock.memory_size = 0;
773 
774 	for (i = 0; i < memblock.memory.cnt; i++)
775 		memblock.memory_size += memblock.memory.regions[i].size;
776 
777 	/* We allow resizing from there */
778 	memblock_can_resize = 1;
779 }
780 
781 void __init memblock_init(void)
782 {
783 	static int init_done __initdata = 0;
784 
785 	if (init_done)
786 		return;
787 	init_done = 1;
788 
789 	/* Hookup the initial arrays */
790 	memblock.memory.regions	= memblock_memory_init_regions;
791 	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
792 	memblock.reserved.regions	= memblock_reserved_init_regions;
793 	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
794 
795 	/* Write a marker in the unused last array entry */
796 	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
797 	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
798 
799 	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
800 	 * This simplifies the memblock_add() code below...
801 	 */
802 	memblock.memory.regions[0].base = 0;
803 	memblock.memory.regions[0].size = 0;
804 	memblock.memory.cnt = 1;
805 
806 	/* Ditto. */
807 	memblock.reserved.regions[0].base = 0;
808 	memblock.reserved.regions[0].size = 0;
809 	memblock.reserved.cnt = 1;
810 
811 	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
812 }
813 
814 static int __init early_memblock(char *p)
815 {
816 	if (p && strstr(p, "debug"))
817 		memblock_debug = 1;
818 	return 0;
819 }
820 early_param("memblock", early_memblock);
821 
822 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
823 
824 static int memblock_debug_show(struct seq_file *m, void *private)
825 {
826 	struct memblock_type *type = m->private;
827 	struct memblock_region *reg;
828 	int i;
829 
830 	for (i = 0; i < type->cnt; i++) {
831 		reg = &type->regions[i];
832 		seq_printf(m, "%4d: ", i);
833 		if (sizeof(phys_addr_t) == 4)
834 			seq_printf(m, "0x%08lx..0x%08lx\n",
835 				   (unsigned long)reg->base,
836 				   (unsigned long)(reg->base + reg->size - 1));
837 		else
838 			seq_printf(m, "0x%016llx..0x%016llx\n",
839 				   (unsigned long long)reg->base,
840 				   (unsigned long long)(reg->base + reg->size - 1));
841 
842 	}
843 	return 0;
844 }
845 
846 static int memblock_debug_open(struct inode *inode, struct file *file)
847 {
848 	return single_open(file, memblock_debug_show, inode->i_private);
849 }
850 
851 static const struct file_operations memblock_debug_fops = {
852 	.open = memblock_debug_open,
853 	.read = seq_read,
854 	.llseek = seq_lseek,
855 	.release = single_release,
856 };
857 
858 static int __init memblock_init_debugfs(void)
859 {
860 	struct dentry *root = debugfs_create_dir("memblock", NULL);
861 	if (!root)
862 		return -ENXIO;
863 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
864 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
865 
866 	return 0;
867 }
868 __initcall(memblock_init_debugfs);
869 
870 #endif /* CONFIG_DEBUG_FS */
871