xref: /openbmc/linux/mm/memblock.c (revision 92ed1a76)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 struct memblock memblock __initdata_memblock;
24 
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29 
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
32 {
33 	if (type == &memblock.memory)
34 		return "memory";
35 	else if (type == &memblock.reserved)
36 		return "reserved";
37 	else
38 		return "unknown";
39 }
40 
41 /*
42  * Address comparison utilities
43  */
44 
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46 {
47 	return addr & ~(size - 1);
48 }
49 
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51 {
52 	return (addr + (size - 1)) & ~(size - 1);
53 }
54 
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 				       phys_addr_t base2, phys_addr_t size2)
57 {
58 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59 }
60 
61 static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
62 			       phys_addr_t base2, phys_addr_t size2)
63 {
64 	if (base2 == base1 + size1)
65 		return 1;
66 	else if (base1 == base2 + size2)
67 		return -1;
68 
69 	return 0;
70 }
71 
72 static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
73 				 unsigned long r1, unsigned long r2)
74 {
75 	phys_addr_t base1 = type->regions[r1].base;
76 	phys_addr_t size1 = type->regions[r1].size;
77 	phys_addr_t base2 = type->regions[r2].base;
78 	phys_addr_t size2 = type->regions[r2].size;
79 
80 	return memblock_addrs_adjacent(base1, size1, base2, size2);
81 }
82 
83 long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
84 {
85 	unsigned long i;
86 
87 	for (i = 0; i < type->cnt; i++) {
88 		phys_addr_t rgnbase = type->regions[i].base;
89 		phys_addr_t rgnsize = type->regions[i].size;
90 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
91 			break;
92 	}
93 
94 	return (i < type->cnt) ? i : -1;
95 }
96 
97 /*
98  * Find, allocate, deallocate or reserve unreserved regions. All allocations
99  * are top-down.
100  */
101 
102 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
103 					  phys_addr_t size, phys_addr_t align)
104 {
105 	phys_addr_t base, res_base;
106 	long j;
107 
108 	/* In case, huge size is requested */
109 	if (end < size)
110 		return MEMBLOCK_ERROR;
111 
112 	base = memblock_align_down((end - size), align);
113 
114 	/* Prevent allocations returning 0 as it's also used to
115 	 * indicate an allocation failure
116 	 */
117 	if (start == 0)
118 		start = PAGE_SIZE;
119 
120 	while (start <= base) {
121 		j = memblock_overlaps_region(&memblock.reserved, base, size);
122 		if (j < 0)
123 			return base;
124 		res_base = memblock.reserved.regions[j].base;
125 		if (res_base < size)
126 			break;
127 		base = memblock_align_down(res_base - size, align);
128 	}
129 
130 	return MEMBLOCK_ERROR;
131 }
132 
133 static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
134 			phys_addr_t align, phys_addr_t start, phys_addr_t end)
135 {
136 	long i;
137 
138 	BUG_ON(0 == size);
139 
140 	size = memblock_align_up(size, align);
141 
142 	/* Pump up max_addr */
143 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
144 		end = memblock.current_limit;
145 
146 	/* We do a top-down search, this tends to limit memory
147 	 * fragmentation by keeping early boot allocs near the
148 	 * top of memory
149 	 */
150 	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
151 		phys_addr_t memblockbase = memblock.memory.regions[i].base;
152 		phys_addr_t memblocksize = memblock.memory.regions[i].size;
153 		phys_addr_t bottom, top, found;
154 
155 		if (memblocksize < size)
156 			continue;
157 		if ((memblockbase + memblocksize) <= start)
158 			break;
159 		bottom = max(memblockbase, start);
160 		top = min(memblockbase + memblocksize, end);
161 		if (bottom >= top)
162 			continue;
163 		found = memblock_find_region(bottom, top, size, align);
164 		if (found != MEMBLOCK_ERROR)
165 			return found;
166 	}
167 	return MEMBLOCK_ERROR;
168 }
169 
170 /*
171  * Find a free area with specified alignment in a specific range.
172  */
173 u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
174 {
175 	return memblock_find_base(size, align, start, end);
176 }
177 
178 /*
179  * Free memblock.reserved.regions
180  */
181 int __init_memblock memblock_free_reserved_regions(void)
182 {
183 	if (memblock.reserved.regions == memblock_reserved_init_regions)
184 		return 0;
185 
186 	return memblock_free(__pa(memblock.reserved.regions),
187 		 sizeof(struct memblock_region) * memblock.reserved.max);
188 }
189 
190 /*
191  * Reserve memblock.reserved.regions
192  */
193 int __init_memblock memblock_reserve_reserved_regions(void)
194 {
195 	if (memblock.reserved.regions == memblock_reserved_init_regions)
196 		return 0;
197 
198 	return memblock_reserve(__pa(memblock.reserved.regions),
199 		 sizeof(struct memblock_region) * memblock.reserved.max);
200 }
201 
202 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
203 {
204 	unsigned long i;
205 
206 	for (i = r; i < type->cnt - 1; i++) {
207 		type->regions[i].base = type->regions[i + 1].base;
208 		type->regions[i].size = type->regions[i + 1].size;
209 	}
210 	type->cnt--;
211 }
212 
213 /* Assumption: base addr of region 1 < base addr of region 2 */
214 static void __init_memblock memblock_coalesce_regions(struct memblock_type *type,
215 		unsigned long r1, unsigned long r2)
216 {
217 	type->regions[r1].size += type->regions[r2].size;
218 	memblock_remove_region(type, r2);
219 }
220 
221 /* Defined below but needed now */
222 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
223 
224 static int __init_memblock memblock_double_array(struct memblock_type *type)
225 {
226 	struct memblock_region *new_array, *old_array;
227 	phys_addr_t old_size, new_size, addr;
228 	int use_slab = slab_is_available();
229 
230 	/* We don't allow resizing until we know about the reserved regions
231 	 * of memory that aren't suitable for allocation
232 	 */
233 	if (!memblock_can_resize)
234 		return -1;
235 
236 	/* Calculate new doubled size */
237 	old_size = type->max * sizeof(struct memblock_region);
238 	new_size = old_size << 1;
239 
240 	/* Try to find some space for it.
241 	 *
242 	 * WARNING: We assume that either slab_is_available() and we use it or
243 	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
244 	 * when bootmem is currently active (unless bootmem itself is implemented
245 	 * on top of MEMBLOCK which isn't the case yet)
246 	 *
247 	 * This should however not be an issue for now, as we currently only
248 	 * call into MEMBLOCK while it's still active, or much later when slab is
249 	 * active for memory hotplug operations
250 	 */
251 	if (use_slab) {
252 		new_array = kmalloc(new_size, GFP_KERNEL);
253 		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
254 	} else
255 		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
256 	if (addr == MEMBLOCK_ERROR) {
257 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
258 		       memblock_type_name(type), type->max, type->max * 2);
259 		return -1;
260 	}
261 	new_array = __va(addr);
262 
263 	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
264 		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
265 
266 	/* Found space, we now need to move the array over before
267 	 * we add the reserved region since it may be our reserved
268 	 * array itself that is full.
269 	 */
270 	memcpy(new_array, type->regions, old_size);
271 	memset(new_array + type->max, 0, old_size);
272 	old_array = type->regions;
273 	type->regions = new_array;
274 	type->max <<= 1;
275 
276 	/* If we use SLAB that's it, we are done */
277 	if (use_slab)
278 		return 0;
279 
280 	/* Add the new reserved region now. Should not fail ! */
281 	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
282 
283 	/* If the array wasn't our static init one, then free it. We only do
284 	 * that before SLAB is available as later on, we don't know whether
285 	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
286 	 * anyways
287 	 */
288 	if (old_array != memblock_memory_init_regions &&
289 	    old_array != memblock_reserved_init_regions)
290 		memblock_free(__pa(old_array), old_size);
291 
292 	return 0;
293 }
294 
295 extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
296 					  phys_addr_t addr2, phys_addr_t size2)
297 {
298 	return 1;
299 }
300 
301 static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
302 {
303 	unsigned long coalesced = 0;
304 	long adjacent, i;
305 
306 	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
307 		type->regions[0].base = base;
308 		type->regions[0].size = size;
309 		return 0;
310 	}
311 
312 	/* First try and coalesce this MEMBLOCK with another. */
313 	for (i = 0; i < type->cnt; i++) {
314 		phys_addr_t rgnbase = type->regions[i].base;
315 		phys_addr_t rgnsize = type->regions[i].size;
316 
317 		if ((rgnbase == base) && (rgnsize == size))
318 			/* Already have this region, so we're done */
319 			return 0;
320 
321 		adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
322 		/* Check if arch allows coalescing */
323 		if (adjacent != 0 && type == &memblock.memory &&
324 		    !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
325 			break;
326 		if (adjacent > 0) {
327 			type->regions[i].base -= size;
328 			type->regions[i].size += size;
329 			coalesced++;
330 			break;
331 		} else if (adjacent < 0) {
332 			type->regions[i].size += size;
333 			coalesced++;
334 			break;
335 		}
336 	}
337 
338 	/* If we plugged a hole, we may want to also coalesce with the
339 	 * next region
340 	 */
341 	if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
342 	    ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
343 							     type->regions[i].size,
344 							     type->regions[i+1].base,
345 							     type->regions[i+1].size)))) {
346 		memblock_coalesce_regions(type, i, i+1);
347 		coalesced++;
348 	}
349 
350 	if (coalesced)
351 		return coalesced;
352 
353 	/* If we are out of space, we fail. It's too late to resize the array
354 	 * but then this shouldn't have happened in the first place.
355 	 */
356 	if (WARN_ON(type->cnt >= type->max))
357 		return -1;
358 
359 	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
360 	for (i = type->cnt - 1; i >= 0; i--) {
361 		if (base < type->regions[i].base) {
362 			type->regions[i+1].base = type->regions[i].base;
363 			type->regions[i+1].size = type->regions[i].size;
364 		} else {
365 			type->regions[i+1].base = base;
366 			type->regions[i+1].size = size;
367 			break;
368 		}
369 	}
370 
371 	if (base < type->regions[0].base) {
372 		type->regions[0].base = base;
373 		type->regions[0].size = size;
374 	}
375 	type->cnt++;
376 
377 	/* The array is full ? Try to resize it. If that fails, we undo
378 	 * our allocation and return an error
379 	 */
380 	if (type->cnt == type->max && memblock_double_array(type)) {
381 		type->cnt--;
382 		return -1;
383 	}
384 
385 	return 0;
386 }
387 
388 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
389 {
390 	return memblock_add_region(&memblock.memory, base, size);
391 
392 }
393 
394 static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
395 {
396 	phys_addr_t rgnbegin, rgnend;
397 	phys_addr_t end = base + size;
398 	int i;
399 
400 	rgnbegin = rgnend = 0; /* supress gcc warnings */
401 
402 	/* Find the region where (base, size) belongs to */
403 	for (i=0; i < type->cnt; i++) {
404 		rgnbegin = type->regions[i].base;
405 		rgnend = rgnbegin + type->regions[i].size;
406 
407 		if ((rgnbegin <= base) && (end <= rgnend))
408 			break;
409 	}
410 
411 	/* Didn't find the region */
412 	if (i == type->cnt)
413 		return -1;
414 
415 	/* Check to see if we are removing entire region */
416 	if ((rgnbegin == base) && (rgnend == end)) {
417 		memblock_remove_region(type, i);
418 		return 0;
419 	}
420 
421 	/* Check to see if region is matching at the front */
422 	if (rgnbegin == base) {
423 		type->regions[i].base = end;
424 		type->regions[i].size -= size;
425 		return 0;
426 	}
427 
428 	/* Check to see if the region is matching at the end */
429 	if (rgnend == end) {
430 		type->regions[i].size -= size;
431 		return 0;
432 	}
433 
434 	/*
435 	 * We need to split the entry -  adjust the current one to the
436 	 * beginging of the hole and add the region after hole.
437 	 */
438 	type->regions[i].size = base - type->regions[i].base;
439 	return memblock_add_region(type, end, rgnend - end);
440 }
441 
442 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
443 {
444 	return __memblock_remove(&memblock.memory, base, size);
445 }
446 
447 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
448 {
449 	return __memblock_remove(&memblock.reserved, base, size);
450 }
451 
452 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
453 {
454 	struct memblock_type *_rgn = &memblock.reserved;
455 
456 	BUG_ON(0 == size);
457 
458 	return memblock_add_region(_rgn, base, size);
459 }
460 
461 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
462 {
463 	phys_addr_t found;
464 
465 	/* We align the size to limit fragmentation. Without this, a lot of
466 	 * small allocs quickly eat up the whole reserve array on sparc
467 	 */
468 	size = memblock_align_up(size, align);
469 
470 	found = memblock_find_base(size, align, 0, max_addr);
471 	if (found != MEMBLOCK_ERROR &&
472 	    memblock_add_region(&memblock.reserved, found, size) >= 0)
473 		return found;
474 
475 	return 0;
476 }
477 
478 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
479 {
480 	phys_addr_t alloc;
481 
482 	alloc = __memblock_alloc_base(size, align, max_addr);
483 
484 	if (alloc == 0)
485 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
486 		      (unsigned long long) size, (unsigned long long) max_addr);
487 
488 	return alloc;
489 }
490 
491 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
492 {
493 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
494 }
495 
496 
497 /*
498  * Additional node-local allocators. Search for node memory is bottom up
499  * and walks memblock regions within that node bottom-up as well, but allocation
500  * within an memblock region is top-down. XXX I plan to fix that at some stage
501  *
502  * WARNING: Only available after early_node_map[] has been populated,
503  * on some architectures, that is after all the calls to add_active_range()
504  * have been done to populate it.
505  */
506 
507 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
508 {
509 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
510 	/*
511 	 * This code originates from sparc which really wants use to walk by addresses
512 	 * and returns the nid. This is not very convenient for early_pfn_map[] users
513 	 * as the map isn't sorted yet, and it really wants to be walked by nid.
514 	 *
515 	 * For now, I implement the inefficient method below which walks the early
516 	 * map multiple times. Eventually we may want to use an ARCH config option
517 	 * to implement a completely different method for both case.
518 	 */
519 	unsigned long start_pfn, end_pfn;
520 	int i;
521 
522 	for (i = 0; i < MAX_NUMNODES; i++) {
523 		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
524 		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
525 			continue;
526 		*nid = i;
527 		return min(end, PFN_PHYS(end_pfn));
528 	}
529 #endif
530 	*nid = 0;
531 
532 	return end;
533 }
534 
535 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
536 					       phys_addr_t size,
537 					       phys_addr_t align, int nid)
538 {
539 	phys_addr_t start, end;
540 
541 	start = mp->base;
542 	end = start + mp->size;
543 
544 	start = memblock_align_up(start, align);
545 	while (start < end) {
546 		phys_addr_t this_end;
547 		int this_nid;
548 
549 		this_end = memblock_nid_range(start, end, &this_nid);
550 		if (this_nid == nid) {
551 			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
552 			if (ret != MEMBLOCK_ERROR &&
553 			    memblock_add_region(&memblock.reserved, ret, size) >= 0)
554 				return ret;
555 		}
556 		start = this_end;
557 	}
558 
559 	return MEMBLOCK_ERROR;
560 }
561 
562 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
563 {
564 	struct memblock_type *mem = &memblock.memory;
565 	int i;
566 
567 	BUG_ON(0 == size);
568 
569 	/* We align the size to limit fragmentation. Without this, a lot of
570 	 * small allocs quickly eat up the whole reserve array on sparc
571 	 */
572 	size = memblock_align_up(size, align);
573 
574 	/* We do a bottom-up search for a region with the right
575 	 * nid since that's easier considering how memblock_nid_range()
576 	 * works
577 	 */
578 	for (i = 0; i < mem->cnt; i++) {
579 		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
580 					       size, align, nid);
581 		if (ret != MEMBLOCK_ERROR)
582 			return ret;
583 	}
584 
585 	return 0;
586 }
587 
588 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
589 {
590 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
591 
592 	if (res)
593 		return res;
594 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
595 }
596 
597 
598 /*
599  * Remaining API functions
600  */
601 
602 /* You must call memblock_analyze() before this. */
603 phys_addr_t __init memblock_phys_mem_size(void)
604 {
605 	return memblock.memory_size;
606 }
607 
608 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
609 {
610 	int idx = memblock.memory.cnt - 1;
611 
612 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
613 }
614 
615 /* You must call memblock_analyze() after this. */
616 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
617 {
618 	unsigned long i;
619 	phys_addr_t limit;
620 	struct memblock_region *p;
621 
622 	if (!memory_limit)
623 		return;
624 
625 	/* Truncate the memblock regions to satisfy the memory limit. */
626 	limit = memory_limit;
627 	for (i = 0; i < memblock.memory.cnt; i++) {
628 		if (limit > memblock.memory.regions[i].size) {
629 			limit -= memblock.memory.regions[i].size;
630 			continue;
631 		}
632 
633 		memblock.memory.regions[i].size = limit;
634 		memblock.memory.cnt = i + 1;
635 		break;
636 	}
637 
638 	memory_limit = memblock_end_of_DRAM();
639 
640 	/* And truncate any reserves above the limit also. */
641 	for (i = 0; i < memblock.reserved.cnt; i++) {
642 		p = &memblock.reserved.regions[i];
643 
644 		if (p->base > memory_limit)
645 			p->size = 0;
646 		else if ((p->base + p->size) > memory_limit)
647 			p->size = memory_limit - p->base;
648 
649 		if (p->size == 0) {
650 			memblock_remove_region(&memblock.reserved, i);
651 			i--;
652 		}
653 	}
654 }
655 
656 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
657 {
658 	unsigned int left = 0, right = type->cnt;
659 
660 	do {
661 		unsigned int mid = (right + left) / 2;
662 
663 		if (addr < type->regions[mid].base)
664 			right = mid;
665 		else if (addr >= (type->regions[mid].base +
666 				  type->regions[mid].size))
667 			left = mid + 1;
668 		else
669 			return mid;
670 	} while (left < right);
671 	return -1;
672 }
673 
674 int __init memblock_is_reserved(phys_addr_t addr)
675 {
676 	return memblock_search(&memblock.reserved, addr) != -1;
677 }
678 
679 int __init_memblock memblock_is_memory(phys_addr_t addr)
680 {
681 	return memblock_search(&memblock.memory, addr) != -1;
682 }
683 
684 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
685 {
686 	int idx = memblock_search(&memblock.reserved, base);
687 
688 	if (idx == -1)
689 		return 0;
690 	return memblock.reserved.regions[idx].base <= base &&
691 		(memblock.reserved.regions[idx].base +
692 		 memblock.reserved.regions[idx].size) >= (base + size);
693 }
694 
695 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
696 {
697 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
698 }
699 
700 
701 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
702 {
703 	memblock.current_limit = limit;
704 }
705 
706 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
707 {
708 	unsigned long long base, size;
709 	int i;
710 
711 	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);
712 
713 	for (i = 0; i < region->cnt; i++) {
714 		base = region->regions[i].base;
715 		size = region->regions[i].size;
716 
717 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
718 		    name, i, base, base + size - 1, size);
719 	}
720 }
721 
722 void __init_memblock memblock_dump_all(void)
723 {
724 	if (!memblock_debug)
725 		return;
726 
727 	pr_info("MEMBLOCK configuration:\n");
728 	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
729 
730 	memblock_dump(&memblock.memory, "memory");
731 	memblock_dump(&memblock.reserved, "reserved");
732 }
733 
734 void __init memblock_analyze(void)
735 {
736 	int i;
737 
738 	/* Check marker in the unused last array entry */
739 	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
740 		!= (phys_addr_t)RED_INACTIVE);
741 	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
742 		!= (phys_addr_t)RED_INACTIVE);
743 
744 	memblock.memory_size = 0;
745 
746 	for (i = 0; i < memblock.memory.cnt; i++)
747 		memblock.memory_size += memblock.memory.regions[i].size;
748 
749 	/* We allow resizing from there */
750 	memblock_can_resize = 1;
751 }
752 
753 void __init memblock_init(void)
754 {
755 	static int init_done __initdata = 0;
756 
757 	if (init_done)
758 		return;
759 	init_done = 1;
760 
761 	/* Hookup the initial arrays */
762 	memblock.memory.regions	= memblock_memory_init_regions;
763 	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
764 	memblock.reserved.regions	= memblock_reserved_init_regions;
765 	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;
766 
767 	/* Write a marker in the unused last array entry */
768 	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
769 	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
770 
771 	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
772 	 * This simplifies the memblock_add() code below...
773 	 */
774 	memblock.memory.regions[0].base = 0;
775 	memblock.memory.regions[0].size = 0;
776 	memblock.memory.cnt = 1;
777 
778 	/* Ditto. */
779 	memblock.reserved.regions[0].base = 0;
780 	memblock.reserved.regions[0].size = 0;
781 	memblock.reserved.cnt = 1;
782 
783 	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
784 }
785 
786 static int __init early_memblock(char *p)
787 {
788 	if (p && strstr(p, "debug"))
789 		memblock_debug = 1;
790 	return 0;
791 }
792 early_param("memblock", early_memblock);
793 
794 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
795 
796 static int memblock_debug_show(struct seq_file *m, void *private)
797 {
798 	struct memblock_type *type = m->private;
799 	struct memblock_region *reg;
800 	int i;
801 
802 	for (i = 0; i < type->cnt; i++) {
803 		reg = &type->regions[i];
804 		seq_printf(m, "%4d: ", i);
805 		if (sizeof(phys_addr_t) == 4)
806 			seq_printf(m, "0x%08lx..0x%08lx\n",
807 				   (unsigned long)reg->base,
808 				   (unsigned long)(reg->base + reg->size - 1));
809 		else
810 			seq_printf(m, "0x%016llx..0x%016llx\n",
811 				   (unsigned long long)reg->base,
812 				   (unsigned long long)(reg->base + reg->size - 1));
813 
814 	}
815 	return 0;
816 }
817 
818 static int memblock_debug_open(struct inode *inode, struct file *file)
819 {
820 	return single_open(file, memblock_debug_show, inode->i_private);
821 }
822 
823 static const struct file_operations memblock_debug_fops = {
824 	.open = memblock_debug_open,
825 	.read = seq_read,
826 	.llseek = seq_lseek,
827 	.release = single_release,
828 };
829 
830 static int __init memblock_init_debugfs(void)
831 {
832 	struct dentry *root = debugfs_create_dir("memblock", NULL);
833 	if (!root)
834 		return -ENXIO;
835 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
836 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
837 
838 	return 0;
839 }
840 __initcall(memblock_init_debugfs);
841 
842 #endif /* CONFIG_DEBUG_FS */
843