xref: /openbmc/linux/mm/memblock.c (revision c4ee0af3)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 #include <asm-generic/sections.h>
24 
25 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
26 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
27 
28 struct memblock memblock __initdata_memblock = {
29 	.memory.regions		= memblock_memory_init_regions,
30 	.memory.cnt		= 1,	/* empty dummy entry */
31 	.memory.max		= INIT_MEMBLOCK_REGIONS,
32 
33 	.reserved.regions	= memblock_reserved_init_regions,
34 	.reserved.cnt		= 1,	/* empty dummy entry */
35 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
36 
37 	.bottom_up		= false,
38 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
39 };
40 
41 int memblock_debug __initdata_memblock;
42 static int memblock_can_resize __initdata_memblock;
43 static int memblock_memory_in_slab __initdata_memblock = 0;
44 static int memblock_reserved_in_slab __initdata_memblock = 0;
45 
46 /* inline so we don't get a warning when pr_debug is compiled out */
47 static __init_memblock const char *
48 memblock_type_name(struct memblock_type *type)
49 {
50 	if (type == &memblock.memory)
51 		return "memory";
52 	else if (type == &memblock.reserved)
53 		return "reserved";
54 	else
55 		return "unknown";
56 }
57 
58 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
59 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
60 {
61 	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
62 }
63 
64 /*
65  * Address comparison utilities
66  */
67 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
68 				       phys_addr_t base2, phys_addr_t size2)
69 {
70 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
71 }
72 
73 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
74 					phys_addr_t base, phys_addr_t size)
75 {
76 	unsigned long i;
77 
78 	for (i = 0; i < type->cnt; i++) {
79 		phys_addr_t rgnbase = type->regions[i].base;
80 		phys_addr_t rgnsize = type->regions[i].size;
81 		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
82 			break;
83 	}
84 
85 	return (i < type->cnt) ? i : -1;
86 }
87 
88 /*
89  * __memblock_find_range_bottom_up - find free area utility in bottom-up
90  * @start: start of candidate range
91  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
92  * @size: size of free area to find
93  * @align: alignment of free area to find
94  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
95  *
96  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
97  *
98  * RETURNS:
99  * Found address on success, 0 on failure.
100  */
101 static phys_addr_t __init_memblock
102 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
103 				phys_addr_t size, phys_addr_t align, int nid)
104 {
105 	phys_addr_t this_start, this_end, cand;
106 	u64 i;
107 
108 	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
109 		this_start = clamp(this_start, start, end);
110 		this_end = clamp(this_end, start, end);
111 
112 		cand = round_up(this_start, align);
113 		if (cand < this_end && this_end - cand >= size)
114 			return cand;
115 	}
116 
117 	return 0;
118 }
119 
120 /**
121  * __memblock_find_range_top_down - find free area utility, in top-down
122  * @start: start of candidate range
123  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
124  * @size: size of free area to find
125  * @align: alignment of free area to find
126  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
127  *
128  * Utility called from memblock_find_in_range_node(), find free area top-down.
129  *
130  * RETURNS:
131  * Found address on success, 0 on failure.
132  */
133 static phys_addr_t __init_memblock
134 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
135 			       phys_addr_t size, phys_addr_t align, int nid)
136 {
137 	phys_addr_t this_start, this_end, cand;
138 	u64 i;
139 
140 	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
141 		this_start = clamp(this_start, start, end);
142 		this_end = clamp(this_end, start, end);
143 
144 		if (this_end < size)
145 			continue;
146 
147 		cand = round_down(this_end - size, align);
148 		if (cand >= this_start)
149 			return cand;
150 	}
151 
152 	return 0;
153 }
154 
155 /**
156  * memblock_find_in_range_node - find free area in given range and node
157  * @start: start of candidate range
158  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
159  * @size: size of free area to find
160  * @align: alignment of free area to find
161  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
162  *
163  * Find @size free area aligned to @align in the specified range and node.
164  *
165  * When allocation direction is bottom-up, the @start should be greater
166  * than the end of the kernel image. Otherwise, it will be trimmed. The
167  * reason is that we want the bottom-up allocation just near the kernel
168  * image so it is highly likely that the allocated memory and the kernel
169  * will reside in the same node.
170  *
171  * If bottom-up allocation failed, will try to allocate memory top-down.
172  *
173  * RETURNS:
174  * Found address on success, 0 on failure.
175  */
176 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
177 					phys_addr_t end, phys_addr_t size,
178 					phys_addr_t align, int nid)
179 {
180 	int ret;
181 	phys_addr_t kernel_end;
182 
183 	/* pump up @end */
184 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
185 		end = memblock.current_limit;
186 
187 	/* avoid allocating the first page */
188 	start = max_t(phys_addr_t, start, PAGE_SIZE);
189 	end = max(start, end);
190 	kernel_end = __pa_symbol(_end);
191 
192 	/*
193 	 * try bottom-up allocation only when bottom-up mode
194 	 * is set and @end is above the kernel image.
195 	 */
196 	if (memblock_bottom_up() && end > kernel_end) {
197 		phys_addr_t bottom_up_start;
198 
199 		/* make sure we will allocate above the kernel */
200 		bottom_up_start = max(start, kernel_end);
201 
202 		/* ok, try bottom-up allocation first */
203 		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
204 						      size, align, nid);
205 		if (ret)
206 			return ret;
207 
208 		/*
209 		 * we always limit bottom-up allocation above the kernel,
210 		 * but top-down allocation doesn't have the limit, so
211 		 * retrying top-down allocation may succeed when bottom-up
212 		 * allocation failed.
213 		 *
214 		 * bottom-up allocation is expected to be fail very rarely,
215 		 * so we use WARN_ONCE() here to see the stack trace if
216 		 * fail happens.
217 		 */
218 		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
219 			     "memory hotunplug may be affected\n");
220 	}
221 
222 	return __memblock_find_range_top_down(start, end, size, align, nid);
223 }
224 
225 /**
226  * memblock_find_in_range - find free area in given range
227  * @start: start of candidate range
228  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
229  * @size: size of free area to find
230  * @align: alignment of free area to find
231  *
232  * Find @size free area aligned to @align in the specified range.
233  *
234  * RETURNS:
235  * Found address on success, 0 on failure.
236  */
237 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
238 					phys_addr_t end, phys_addr_t size,
239 					phys_addr_t align)
240 {
241 	return memblock_find_in_range_node(start, end, size, align,
242 					   MAX_NUMNODES);
243 }
244 
245 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
246 {
247 	type->total_size -= type->regions[r].size;
248 	memmove(&type->regions[r], &type->regions[r + 1],
249 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
250 	type->cnt--;
251 
252 	/* Special case for empty arrays */
253 	if (type->cnt == 0) {
254 		WARN_ON(type->total_size != 0);
255 		type->cnt = 1;
256 		type->regions[0].base = 0;
257 		type->regions[0].size = 0;
258 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
259 	}
260 }
261 
262 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
263 					phys_addr_t *addr)
264 {
265 	if (memblock.reserved.regions == memblock_reserved_init_regions)
266 		return 0;
267 
268 	*addr = __pa(memblock.reserved.regions);
269 
270 	return PAGE_ALIGN(sizeof(struct memblock_region) *
271 			  memblock.reserved.max);
272 }
273 
274 /**
275  * memblock_double_array - double the size of the memblock regions array
276  * @type: memblock type of the regions array being doubled
277  * @new_area_start: starting address of memory range to avoid overlap with
278  * @new_area_size: size of memory range to avoid overlap with
279  *
280  * Double the size of the @type regions array. If memblock is being used to
281  * allocate memory for a new reserved regions array and there is a previously
282  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
283  * waiting to be reserved, ensure the memory used by the new array does
284  * not overlap.
285  *
286  * RETURNS:
287  * 0 on success, -1 on failure.
288  */
289 static int __init_memblock memblock_double_array(struct memblock_type *type,
290 						phys_addr_t new_area_start,
291 						phys_addr_t new_area_size)
292 {
293 	struct memblock_region *new_array, *old_array;
294 	phys_addr_t old_alloc_size, new_alloc_size;
295 	phys_addr_t old_size, new_size, addr;
296 	int use_slab = slab_is_available();
297 	int *in_slab;
298 
299 	/* We don't allow resizing until we know about the reserved regions
300 	 * of memory that aren't suitable for allocation
301 	 */
302 	if (!memblock_can_resize)
303 		return -1;
304 
305 	/* Calculate new doubled size */
306 	old_size = type->max * sizeof(struct memblock_region);
307 	new_size = old_size << 1;
308 	/*
309 	 * We need to allocated new one align to PAGE_SIZE,
310 	 *   so we can free them completely later.
311 	 */
312 	old_alloc_size = PAGE_ALIGN(old_size);
313 	new_alloc_size = PAGE_ALIGN(new_size);
314 
315 	/* Retrieve the slab flag */
316 	if (type == &memblock.memory)
317 		in_slab = &memblock_memory_in_slab;
318 	else
319 		in_slab = &memblock_reserved_in_slab;
320 
321 	/* Try to find some space for it.
322 	 *
323 	 * WARNING: We assume that either slab_is_available() and we use it or
324 	 * we use MEMBLOCK for allocations. That means that this is unsafe to
325 	 * use when bootmem is currently active (unless bootmem itself is
326 	 * implemented on top of MEMBLOCK which isn't the case yet)
327 	 *
328 	 * This should however not be an issue for now, as we currently only
329 	 * call into MEMBLOCK while it's still active, or much later when slab
330 	 * is active for memory hotplug operations
331 	 */
332 	if (use_slab) {
333 		new_array = kmalloc(new_size, GFP_KERNEL);
334 		addr = new_array ? __pa(new_array) : 0;
335 	} else {
336 		/* only exclude range when trying to double reserved.regions */
337 		if (type != &memblock.reserved)
338 			new_area_start = new_area_size = 0;
339 
340 		addr = memblock_find_in_range(new_area_start + new_area_size,
341 						memblock.current_limit,
342 						new_alloc_size, PAGE_SIZE);
343 		if (!addr && new_area_size)
344 			addr = memblock_find_in_range(0,
345 				min(new_area_start, memblock.current_limit),
346 				new_alloc_size, PAGE_SIZE);
347 
348 		new_array = addr ? __va(addr) : NULL;
349 	}
350 	if (!addr) {
351 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
352 		       memblock_type_name(type), type->max, type->max * 2);
353 		return -1;
354 	}
355 
356 	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
357 			memblock_type_name(type), type->max * 2, (u64)addr,
358 			(u64)addr + new_size - 1);
359 
360 	/*
361 	 * Found space, we now need to move the array over before we add the
362 	 * reserved region since it may be our reserved array itself that is
363 	 * full.
364 	 */
365 	memcpy(new_array, type->regions, old_size);
366 	memset(new_array + type->max, 0, old_size);
367 	old_array = type->regions;
368 	type->regions = new_array;
369 	type->max <<= 1;
370 
371 	/* Free old array. We needn't free it if the array is the static one */
372 	if (*in_slab)
373 		kfree(old_array);
374 	else if (old_array != memblock_memory_init_regions &&
375 		 old_array != memblock_reserved_init_regions)
376 		memblock_free(__pa(old_array), old_alloc_size);
377 
378 	/*
379 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
380 	 * needn't do it
381 	 */
382 	if (!use_slab)
383 		BUG_ON(memblock_reserve(addr, new_alloc_size));
384 
385 	/* Update slab flag */
386 	*in_slab = use_slab;
387 
388 	return 0;
389 }
390 
391 /**
392  * memblock_merge_regions - merge neighboring compatible regions
393  * @type: memblock type to scan
394  *
395  * Scan @type and merge neighboring compatible regions.
396  */
397 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
398 {
399 	int i = 0;
400 
401 	/* cnt never goes below 1 */
402 	while (i < type->cnt - 1) {
403 		struct memblock_region *this = &type->regions[i];
404 		struct memblock_region *next = &type->regions[i + 1];
405 
406 		if (this->base + this->size != next->base ||
407 		    memblock_get_region_node(this) !=
408 		    memblock_get_region_node(next)) {
409 			BUG_ON(this->base + this->size > next->base);
410 			i++;
411 			continue;
412 		}
413 
414 		this->size += next->size;
415 		/* move forward from next + 1, index of which is i + 2 */
416 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
417 		type->cnt--;
418 	}
419 }
420 
421 /**
422  * memblock_insert_region - insert new memblock region
423  * @type:	memblock type to insert into
424  * @idx:	index for the insertion point
425  * @base:	base address of the new region
426  * @size:	size of the new region
427  * @nid:	node id of the new region
428  *
429  * Insert new memblock region [@base,@base+@size) into @type at @idx.
430  * @type must already have extra room to accomodate the new region.
431  */
432 static void __init_memblock memblock_insert_region(struct memblock_type *type,
433 						   int idx, phys_addr_t base,
434 						   phys_addr_t size, int nid)
435 {
436 	struct memblock_region *rgn = &type->regions[idx];
437 
438 	BUG_ON(type->cnt >= type->max);
439 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
440 	rgn->base = base;
441 	rgn->size = size;
442 	memblock_set_region_node(rgn, nid);
443 	type->cnt++;
444 	type->total_size += size;
445 }
446 
447 /**
448  * memblock_add_region - add new memblock region
449  * @type: memblock type to add new region into
450  * @base: base address of the new region
451  * @size: size of the new region
452  * @nid: nid of the new region
453  *
454  * Add new memblock region [@base,@base+@size) into @type.  The new region
455  * is allowed to overlap with existing ones - overlaps don't affect already
456  * existing regions.  @type is guaranteed to be minimal (all neighbouring
457  * compatible regions are merged) after the addition.
458  *
459  * RETURNS:
460  * 0 on success, -errno on failure.
461  */
462 static int __init_memblock memblock_add_region(struct memblock_type *type,
463 				phys_addr_t base, phys_addr_t size, int nid)
464 {
465 	bool insert = false;
466 	phys_addr_t obase = base;
467 	phys_addr_t end = base + memblock_cap_size(base, &size);
468 	int i, nr_new;
469 
470 	if (!size)
471 		return 0;
472 
473 	/* special case for empty array */
474 	if (type->regions[0].size == 0) {
475 		WARN_ON(type->cnt != 1 || type->total_size);
476 		type->regions[0].base = base;
477 		type->regions[0].size = size;
478 		memblock_set_region_node(&type->regions[0], nid);
479 		type->total_size = size;
480 		return 0;
481 	}
482 repeat:
483 	/*
484 	 * The following is executed twice.  Once with %false @insert and
485 	 * then with %true.  The first counts the number of regions needed
486 	 * to accomodate the new area.  The second actually inserts them.
487 	 */
488 	base = obase;
489 	nr_new = 0;
490 
491 	for (i = 0; i < type->cnt; i++) {
492 		struct memblock_region *rgn = &type->regions[i];
493 		phys_addr_t rbase = rgn->base;
494 		phys_addr_t rend = rbase + rgn->size;
495 
496 		if (rbase >= end)
497 			break;
498 		if (rend <= base)
499 			continue;
500 		/*
501 		 * @rgn overlaps.  If it separates the lower part of new
502 		 * area, insert that portion.
503 		 */
504 		if (rbase > base) {
505 			nr_new++;
506 			if (insert)
507 				memblock_insert_region(type, i++, base,
508 						       rbase - base, nid);
509 		}
510 		/* area below @rend is dealt with, forget about it */
511 		base = min(rend, end);
512 	}
513 
514 	/* insert the remaining portion */
515 	if (base < end) {
516 		nr_new++;
517 		if (insert)
518 			memblock_insert_region(type, i, base, end - base, nid);
519 	}
520 
521 	/*
522 	 * If this was the first round, resize array and repeat for actual
523 	 * insertions; otherwise, merge and return.
524 	 */
525 	if (!insert) {
526 		while (type->cnt + nr_new > type->max)
527 			if (memblock_double_array(type, obase, size) < 0)
528 				return -ENOMEM;
529 		insert = true;
530 		goto repeat;
531 	} else {
532 		memblock_merge_regions(type);
533 		return 0;
534 	}
535 }
536 
537 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
538 				       int nid)
539 {
540 	return memblock_add_region(&memblock.memory, base, size, nid);
541 }
542 
543 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
544 {
545 	return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
546 }
547 
548 /**
549  * memblock_isolate_range - isolate given range into disjoint memblocks
550  * @type: memblock type to isolate range for
551  * @base: base of range to isolate
552  * @size: size of range to isolate
553  * @start_rgn: out parameter for the start of isolated region
554  * @end_rgn: out parameter for the end of isolated region
555  *
556  * Walk @type and ensure that regions don't cross the boundaries defined by
557  * [@base,@base+@size).  Crossing regions are split at the boundaries,
558  * which may create at most two more regions.  The index of the first
559  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
560  *
561  * RETURNS:
562  * 0 on success, -errno on failure.
563  */
564 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
565 					phys_addr_t base, phys_addr_t size,
566 					int *start_rgn, int *end_rgn)
567 {
568 	phys_addr_t end = base + memblock_cap_size(base, &size);
569 	int i;
570 
571 	*start_rgn = *end_rgn = 0;
572 
573 	if (!size)
574 		return 0;
575 
576 	/* we'll create at most two more regions */
577 	while (type->cnt + 2 > type->max)
578 		if (memblock_double_array(type, base, size) < 0)
579 			return -ENOMEM;
580 
581 	for (i = 0; i < type->cnt; i++) {
582 		struct memblock_region *rgn = &type->regions[i];
583 		phys_addr_t rbase = rgn->base;
584 		phys_addr_t rend = rbase + rgn->size;
585 
586 		if (rbase >= end)
587 			break;
588 		if (rend <= base)
589 			continue;
590 
591 		if (rbase < base) {
592 			/*
593 			 * @rgn intersects from below.  Split and continue
594 			 * to process the next region - the new top half.
595 			 */
596 			rgn->base = base;
597 			rgn->size -= base - rbase;
598 			type->total_size -= base - rbase;
599 			memblock_insert_region(type, i, rbase, base - rbase,
600 					       memblock_get_region_node(rgn));
601 		} else if (rend > end) {
602 			/*
603 			 * @rgn intersects from above.  Split and redo the
604 			 * current region - the new bottom half.
605 			 */
606 			rgn->base = end;
607 			rgn->size -= end - rbase;
608 			type->total_size -= end - rbase;
609 			memblock_insert_region(type, i--, rbase, end - rbase,
610 					       memblock_get_region_node(rgn));
611 		} else {
612 			/* @rgn is fully contained, record it */
613 			if (!*end_rgn)
614 				*start_rgn = i;
615 			*end_rgn = i + 1;
616 		}
617 	}
618 
619 	return 0;
620 }
621 
622 static int __init_memblock __memblock_remove(struct memblock_type *type,
623 					     phys_addr_t base, phys_addr_t size)
624 {
625 	int start_rgn, end_rgn;
626 	int i, ret;
627 
628 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
629 	if (ret)
630 		return ret;
631 
632 	for (i = end_rgn - 1; i >= start_rgn; i--)
633 		memblock_remove_region(type, i);
634 	return 0;
635 }
636 
637 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
638 {
639 	return __memblock_remove(&memblock.memory, base, size);
640 }
641 
642 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
643 {
644 	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
645 		     (unsigned long long)base,
646 		     (unsigned long long)base + size,
647 		     (void *)_RET_IP_);
648 
649 	return __memblock_remove(&memblock.reserved, base, size);
650 }
651 
652 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
653 {
654 	struct memblock_type *_rgn = &memblock.reserved;
655 
656 	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
657 		     (unsigned long long)base,
658 		     (unsigned long long)base + size,
659 		     (void *)_RET_IP_);
660 
661 	return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
662 }
663 
664 /**
665  * __next_free_mem_range - next function for for_each_free_mem_range()
666  * @idx: pointer to u64 loop variable
667  * @nid: node selector, %MAX_NUMNODES for all nodes
668  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
669  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
670  * @out_nid: ptr to int for nid of the range, can be %NULL
671  *
672  * Find the first free area from *@idx which matches @nid, fill the out
673  * parameters, and update *@idx for the next iteration.  The lower 32bit of
674  * *@idx contains index into memory region and the upper 32bit indexes the
675  * areas before each reserved region.  For example, if reserved regions
676  * look like the following,
677  *
678  *	0:[0-16), 1:[32-48), 2:[128-130)
679  *
680  * The upper 32bit indexes the following regions.
681  *
682  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
683  *
684  * As both region arrays are sorted, the function advances the two indices
685  * in lockstep and returns each intersection.
686  */
687 void __init_memblock __next_free_mem_range(u64 *idx, int nid,
688 					   phys_addr_t *out_start,
689 					   phys_addr_t *out_end, int *out_nid)
690 {
691 	struct memblock_type *mem = &memblock.memory;
692 	struct memblock_type *rsv = &memblock.reserved;
693 	int mi = *idx & 0xffffffff;
694 	int ri = *idx >> 32;
695 
696 	for ( ; mi < mem->cnt; mi++) {
697 		struct memblock_region *m = &mem->regions[mi];
698 		phys_addr_t m_start = m->base;
699 		phys_addr_t m_end = m->base + m->size;
700 
701 		/* only memory regions are associated with nodes, check it */
702 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
703 			continue;
704 
705 		/* scan areas before each reservation for intersection */
706 		for ( ; ri < rsv->cnt + 1; ri++) {
707 			struct memblock_region *r = &rsv->regions[ri];
708 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
709 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
710 
711 			/* if ri advanced past mi, break out to advance mi */
712 			if (r_start >= m_end)
713 				break;
714 			/* if the two regions intersect, we're done */
715 			if (m_start < r_end) {
716 				if (out_start)
717 					*out_start = max(m_start, r_start);
718 				if (out_end)
719 					*out_end = min(m_end, r_end);
720 				if (out_nid)
721 					*out_nid = memblock_get_region_node(m);
722 				/*
723 				 * The region which ends first is advanced
724 				 * for the next iteration.
725 				 */
726 				if (m_end <= r_end)
727 					mi++;
728 				else
729 					ri++;
730 				*idx = (u32)mi | (u64)ri << 32;
731 				return;
732 			}
733 		}
734 	}
735 
736 	/* signal end of iteration */
737 	*idx = ULLONG_MAX;
738 }
739 
740 /**
741  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
742  * @idx: pointer to u64 loop variable
743  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
744  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
745  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
746  * @out_nid: ptr to int for nid of the range, can be %NULL
747  *
748  * Reverse of __next_free_mem_range().
749  */
750 void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
751 					   phys_addr_t *out_start,
752 					   phys_addr_t *out_end, int *out_nid)
753 {
754 	struct memblock_type *mem = &memblock.memory;
755 	struct memblock_type *rsv = &memblock.reserved;
756 	int mi = *idx & 0xffffffff;
757 	int ri = *idx >> 32;
758 
759 	if (*idx == (u64)ULLONG_MAX) {
760 		mi = mem->cnt - 1;
761 		ri = rsv->cnt;
762 	}
763 
764 	for ( ; mi >= 0; mi--) {
765 		struct memblock_region *m = &mem->regions[mi];
766 		phys_addr_t m_start = m->base;
767 		phys_addr_t m_end = m->base + m->size;
768 
769 		/* only memory regions are associated with nodes, check it */
770 		if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
771 			continue;
772 
773 		/* scan areas before each reservation for intersection */
774 		for ( ; ri >= 0; ri--) {
775 			struct memblock_region *r = &rsv->regions[ri];
776 			phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
777 			phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
778 
779 			/* if ri advanced past mi, break out to advance mi */
780 			if (r_end <= m_start)
781 				break;
782 			/* if the two regions intersect, we're done */
783 			if (m_end > r_start) {
784 				if (out_start)
785 					*out_start = max(m_start, r_start);
786 				if (out_end)
787 					*out_end = min(m_end, r_end);
788 				if (out_nid)
789 					*out_nid = memblock_get_region_node(m);
790 
791 				if (m_start >= r_start)
792 					mi--;
793 				else
794 					ri--;
795 				*idx = (u32)mi | (u64)ri << 32;
796 				return;
797 			}
798 		}
799 	}
800 
801 	*idx = ULLONG_MAX;
802 }
803 
804 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
805 /*
806  * Common iterator interface used to define for_each_mem_range().
807  */
808 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
809 				unsigned long *out_start_pfn,
810 				unsigned long *out_end_pfn, int *out_nid)
811 {
812 	struct memblock_type *type = &memblock.memory;
813 	struct memblock_region *r;
814 
815 	while (++*idx < type->cnt) {
816 		r = &type->regions[*idx];
817 
818 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
819 			continue;
820 		if (nid == MAX_NUMNODES || nid == r->nid)
821 			break;
822 	}
823 	if (*idx >= type->cnt) {
824 		*idx = -1;
825 		return;
826 	}
827 
828 	if (out_start_pfn)
829 		*out_start_pfn = PFN_UP(r->base);
830 	if (out_end_pfn)
831 		*out_end_pfn = PFN_DOWN(r->base + r->size);
832 	if (out_nid)
833 		*out_nid = r->nid;
834 }
835 
836 /**
837  * memblock_set_node - set node ID on memblock regions
838  * @base: base of area to set node ID for
839  * @size: size of area to set node ID for
840  * @nid: node ID to set
841  *
842  * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
843  * Regions which cross the area boundaries are split as necessary.
844  *
845  * RETURNS:
846  * 0 on success, -errno on failure.
847  */
848 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
849 				      int nid)
850 {
851 	struct memblock_type *type = &memblock.memory;
852 	int start_rgn, end_rgn;
853 	int i, ret;
854 
855 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
856 	if (ret)
857 		return ret;
858 
859 	for (i = start_rgn; i < end_rgn; i++)
860 		memblock_set_region_node(&type->regions[i], nid);
861 
862 	memblock_merge_regions(type);
863 	return 0;
864 }
865 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
866 
867 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
868 					phys_addr_t align, phys_addr_t max_addr,
869 					int nid)
870 {
871 	phys_addr_t found;
872 
873 	if (WARN_ON(!align))
874 		align = __alignof__(long long);
875 
876 	/* align @size to avoid excessive fragmentation on reserved array */
877 	size = round_up(size, align);
878 
879 	found = memblock_find_in_range_node(0, max_addr, size, align, nid);
880 	if (found && !memblock_reserve(found, size))
881 		return found;
882 
883 	return 0;
884 }
885 
886 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
887 {
888 	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
889 }
890 
891 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
892 {
893 	return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
894 }
895 
896 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
897 {
898 	phys_addr_t alloc;
899 
900 	alloc = __memblock_alloc_base(size, align, max_addr);
901 
902 	if (alloc == 0)
903 		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
904 		      (unsigned long long) size, (unsigned long long) max_addr);
905 
906 	return alloc;
907 }
908 
909 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
910 {
911 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
912 }
913 
914 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
915 {
916 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
917 
918 	if (res)
919 		return res;
920 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
921 }
922 
923 
924 /*
925  * Remaining API functions
926  */
927 
928 phys_addr_t __init memblock_phys_mem_size(void)
929 {
930 	return memblock.memory.total_size;
931 }
932 
933 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
934 {
935 	unsigned long pages = 0;
936 	struct memblock_region *r;
937 	unsigned long start_pfn, end_pfn;
938 
939 	for_each_memblock(memory, r) {
940 		start_pfn = memblock_region_memory_base_pfn(r);
941 		end_pfn = memblock_region_memory_end_pfn(r);
942 		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
943 		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
944 		pages += end_pfn - start_pfn;
945 	}
946 
947 	return (phys_addr_t)pages << PAGE_SHIFT;
948 }
949 
950 /* lowest address */
951 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
952 {
953 	return memblock.memory.regions[0].base;
954 }
955 
956 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
957 {
958 	int idx = memblock.memory.cnt - 1;
959 
960 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
961 }
962 
963 void __init memblock_enforce_memory_limit(phys_addr_t limit)
964 {
965 	unsigned long i;
966 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
967 
968 	if (!limit)
969 		return;
970 
971 	/* find out max address */
972 	for (i = 0; i < memblock.memory.cnt; i++) {
973 		struct memblock_region *r = &memblock.memory.regions[i];
974 
975 		if (limit <= r->size) {
976 			max_addr = r->base + limit;
977 			break;
978 		}
979 		limit -= r->size;
980 	}
981 
982 	/* truncate both memory and reserved regions */
983 	__memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
984 	__memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
985 }
986 
987 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
988 {
989 	unsigned int left = 0, right = type->cnt;
990 
991 	do {
992 		unsigned int mid = (right + left) / 2;
993 
994 		if (addr < type->regions[mid].base)
995 			right = mid;
996 		else if (addr >= (type->regions[mid].base +
997 				  type->regions[mid].size))
998 			left = mid + 1;
999 		else
1000 			return mid;
1001 	} while (left < right);
1002 	return -1;
1003 }
1004 
1005 int __init memblock_is_reserved(phys_addr_t addr)
1006 {
1007 	return memblock_search(&memblock.reserved, addr) != -1;
1008 }
1009 
1010 int __init_memblock memblock_is_memory(phys_addr_t addr)
1011 {
1012 	return memblock_search(&memblock.memory, addr) != -1;
1013 }
1014 
1015 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1016 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1017 			 unsigned long *start_pfn, unsigned long *end_pfn)
1018 {
1019 	struct memblock_type *type = &memblock.memory;
1020 	int mid = memblock_search(type, (phys_addr_t)pfn << PAGE_SHIFT);
1021 
1022 	if (mid == -1)
1023 		return -1;
1024 
1025 	*start_pfn = type->regions[mid].base >> PAGE_SHIFT;
1026 	*end_pfn = (type->regions[mid].base + type->regions[mid].size)
1027 			>> PAGE_SHIFT;
1028 
1029 	return type->regions[mid].nid;
1030 }
1031 #endif
1032 
1033 /**
1034  * memblock_is_region_memory - check if a region is a subset of memory
1035  * @base: base of region to check
1036  * @size: size of region to check
1037  *
1038  * Check if the region [@base, @base+@size) is a subset of a memory block.
1039  *
1040  * RETURNS:
1041  * 0 if false, non-zero if true
1042  */
1043 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1044 {
1045 	int idx = memblock_search(&memblock.memory, base);
1046 	phys_addr_t end = base + memblock_cap_size(base, &size);
1047 
1048 	if (idx == -1)
1049 		return 0;
1050 	return memblock.memory.regions[idx].base <= base &&
1051 		(memblock.memory.regions[idx].base +
1052 		 memblock.memory.regions[idx].size) >= end;
1053 }
1054 
1055 /**
1056  * memblock_is_region_reserved - check if a region intersects reserved memory
1057  * @base: base of region to check
1058  * @size: size of region to check
1059  *
1060  * Check if the region [@base, @base+@size) intersects a reserved memory block.
1061  *
1062  * RETURNS:
1063  * 0 if false, non-zero if true
1064  */
1065 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1066 {
1067 	memblock_cap_size(base, &size);
1068 	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
1069 }
1070 
1071 void __init_memblock memblock_trim_memory(phys_addr_t align)
1072 {
1073 	int i;
1074 	phys_addr_t start, end, orig_start, orig_end;
1075 	struct memblock_type *mem = &memblock.memory;
1076 
1077 	for (i = 0; i < mem->cnt; i++) {
1078 		orig_start = mem->regions[i].base;
1079 		orig_end = mem->regions[i].base + mem->regions[i].size;
1080 		start = round_up(orig_start, align);
1081 		end = round_down(orig_end, align);
1082 
1083 		if (start == orig_start && end == orig_end)
1084 			continue;
1085 
1086 		if (start < end) {
1087 			mem->regions[i].base = start;
1088 			mem->regions[i].size = end - start;
1089 		} else {
1090 			memblock_remove_region(mem, i);
1091 			i--;
1092 		}
1093 	}
1094 }
1095 
1096 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1097 {
1098 	memblock.current_limit = limit;
1099 }
1100 
1101 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1102 {
1103 	unsigned long long base, size;
1104 	int i;
1105 
1106 	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1107 
1108 	for (i = 0; i < type->cnt; i++) {
1109 		struct memblock_region *rgn = &type->regions[i];
1110 		char nid_buf[32] = "";
1111 
1112 		base = rgn->base;
1113 		size = rgn->size;
1114 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1115 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1116 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1117 				 memblock_get_region_node(rgn));
1118 #endif
1119 		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
1120 			name, i, base, base + size - 1, size, nid_buf);
1121 	}
1122 }
1123 
1124 void __init_memblock __memblock_dump_all(void)
1125 {
1126 	pr_info("MEMBLOCK configuration:\n");
1127 	pr_info(" memory size = %#llx reserved size = %#llx\n",
1128 		(unsigned long long)memblock.memory.total_size,
1129 		(unsigned long long)memblock.reserved.total_size);
1130 
1131 	memblock_dump(&memblock.memory, "memory");
1132 	memblock_dump(&memblock.reserved, "reserved");
1133 }
1134 
1135 void __init memblock_allow_resize(void)
1136 {
1137 	memblock_can_resize = 1;
1138 }
1139 
1140 static int __init early_memblock(char *p)
1141 {
1142 	if (p && strstr(p, "debug"))
1143 		memblock_debug = 1;
1144 	return 0;
1145 }
1146 early_param("memblock", early_memblock);
1147 
1148 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1149 
1150 static int memblock_debug_show(struct seq_file *m, void *private)
1151 {
1152 	struct memblock_type *type = m->private;
1153 	struct memblock_region *reg;
1154 	int i;
1155 
1156 	for (i = 0; i < type->cnt; i++) {
1157 		reg = &type->regions[i];
1158 		seq_printf(m, "%4d: ", i);
1159 		if (sizeof(phys_addr_t) == 4)
1160 			seq_printf(m, "0x%08lx..0x%08lx\n",
1161 				   (unsigned long)reg->base,
1162 				   (unsigned long)(reg->base + reg->size - 1));
1163 		else
1164 			seq_printf(m, "0x%016llx..0x%016llx\n",
1165 				   (unsigned long long)reg->base,
1166 				   (unsigned long long)(reg->base + reg->size - 1));
1167 
1168 	}
1169 	return 0;
1170 }
1171 
1172 static int memblock_debug_open(struct inode *inode, struct file *file)
1173 {
1174 	return single_open(file, memblock_debug_show, inode->i_private);
1175 }
1176 
1177 static const struct file_operations memblock_debug_fops = {
1178 	.open = memblock_debug_open,
1179 	.read = seq_read,
1180 	.llseek = seq_lseek,
1181 	.release = single_release,
1182 };
1183 
1184 static int __init memblock_init_debugfs(void)
1185 {
1186 	struct dentry *root = debugfs_create_dir("memblock", NULL);
1187 	if (!root)
1188 		return -ENXIO;
1189 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1190 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1191 
1192 	return 0;
1193 }
1194 __initcall(memblock_init_debugfs);
1195 
1196 #endif /* CONFIG_DEBUG_FS */
1197