xref: /openbmc/linux/mm/memblock.c (revision e5c86679)
1 /*
2  * Procedures for maintaining information about logical memory blocks.
3  *
4  * Peter Bergner, IBM Corp.	June 2001.
5  * Copyright (C) 2001 Peter Bergner.
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22 
23 #include <asm/sections.h>
24 #include <linux/io.h>
25 
26 #include "internal.h"
27 
28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32 #endif
33 
34 struct memblock memblock __initdata_memblock = {
35 	.memory.regions		= memblock_memory_init_regions,
36 	.memory.cnt		= 1,	/* empty dummy entry */
37 	.memory.max		= INIT_MEMBLOCK_REGIONS,
38 	.memory.name		= "memory",
39 
40 	.reserved.regions	= memblock_reserved_init_regions,
41 	.reserved.cnt		= 1,	/* empty dummy entry */
42 	.reserved.max		= INIT_MEMBLOCK_REGIONS,
43 	.reserved.name		= "reserved",
44 
45 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
46 	.physmem.regions	= memblock_physmem_init_regions,
47 	.physmem.cnt		= 1,	/* empty dummy entry */
48 	.physmem.max		= INIT_PHYSMEM_REGIONS,
49 	.physmem.name		= "physmem",
50 #endif
51 
52 	.bottom_up		= false,
53 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
54 };
55 
56 int memblock_debug __initdata_memblock;
57 #ifdef CONFIG_MOVABLE_NODE
58 bool movable_node_enabled __initdata_memblock = false;
59 #endif
60 static bool system_has_some_mirror __initdata_memblock = false;
61 static int memblock_can_resize __initdata_memblock;
62 static int memblock_memory_in_slab __initdata_memblock = 0;
63 static int memblock_reserved_in_slab __initdata_memblock = 0;
64 
65 ulong __init_memblock choose_memblock_flags(void)
66 {
67 	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
68 }
69 
70 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
71 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
72 {
73 	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
74 }
75 
76 /*
77  * Address comparison utilities
78  */
79 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
80 				       phys_addr_t base2, phys_addr_t size2)
81 {
82 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
83 }
84 
85 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
86 					phys_addr_t base, phys_addr_t size)
87 {
88 	unsigned long i;
89 
90 	for (i = 0; i < type->cnt; i++)
91 		if (memblock_addrs_overlap(base, size, type->regions[i].base,
92 					   type->regions[i].size))
93 			break;
94 	return i < type->cnt;
95 }
96 
97 /*
98  * __memblock_find_range_bottom_up - find free area utility in bottom-up
99  * @start: start of candidate range
100  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
101  * @size: size of free area to find
102  * @align: alignment of free area to find
103  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
104  * @flags: pick from blocks based on memory attributes
105  *
106  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
107  *
108  * RETURNS:
109  * Found address on success, 0 on failure.
110  */
111 static phys_addr_t __init_memblock
112 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
113 				phys_addr_t size, phys_addr_t align, int nid,
114 				ulong flags)
115 {
116 	phys_addr_t this_start, this_end, cand;
117 	u64 i;
118 
119 	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
120 		this_start = clamp(this_start, start, end);
121 		this_end = clamp(this_end, start, end);
122 
123 		cand = round_up(this_start, align);
124 		if (cand < this_end && this_end - cand >= size)
125 			return cand;
126 	}
127 
128 	return 0;
129 }
130 
131 /**
132  * __memblock_find_range_top_down - find free area utility, in top-down
133  * @start: start of candidate range
134  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
135  * @size: size of free area to find
136  * @align: alignment of free area to find
137  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
138  * @flags: pick from blocks based on memory attributes
139  *
140  * Utility called from memblock_find_in_range_node(), find free area top-down.
141  *
142  * RETURNS:
143  * Found address on success, 0 on failure.
144  */
145 static phys_addr_t __init_memblock
146 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
147 			       phys_addr_t size, phys_addr_t align, int nid,
148 			       ulong flags)
149 {
150 	phys_addr_t this_start, this_end, cand;
151 	u64 i;
152 
153 	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
154 					NULL) {
155 		this_start = clamp(this_start, start, end);
156 		this_end = clamp(this_end, start, end);
157 
158 		if (this_end < size)
159 			continue;
160 
161 		cand = round_down(this_end - size, align);
162 		if (cand >= this_start)
163 			return cand;
164 	}
165 
166 	return 0;
167 }
168 
169 /**
170  * memblock_find_in_range_node - find free area in given range and node
171  * @size: size of free area to find
172  * @align: alignment of free area to find
173  * @start: start of candidate range
174  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
175  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
176  * @flags: pick from blocks based on memory attributes
177  *
178  * Find @size free area aligned to @align in the specified range and node.
179  *
180  * When allocation direction is bottom-up, the @start should be greater
181  * than the end of the kernel image. Otherwise, it will be trimmed. The
182  * reason is that we want the bottom-up allocation just near the kernel
183  * image so it is highly likely that the allocated memory and the kernel
184  * will reside in the same node.
185  *
186  * If bottom-up allocation failed, will try to allocate memory top-down.
187  *
188  * RETURNS:
189  * Found address on success, 0 on failure.
190  */
191 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
192 					phys_addr_t align, phys_addr_t start,
193 					phys_addr_t end, int nid, ulong flags)
194 {
195 	phys_addr_t kernel_end, ret;
196 
197 	/* pump up @end */
198 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
199 		end = memblock.current_limit;
200 
201 	/* avoid allocating the first page */
202 	start = max_t(phys_addr_t, start, PAGE_SIZE);
203 	end = max(start, end);
204 	kernel_end = __pa_symbol(_end);
205 
206 	/*
207 	 * try bottom-up allocation only when bottom-up mode
208 	 * is set and @end is above the kernel image.
209 	 */
210 	if (memblock_bottom_up() && end > kernel_end) {
211 		phys_addr_t bottom_up_start;
212 
213 		/* make sure we will allocate above the kernel */
214 		bottom_up_start = max(start, kernel_end);
215 
216 		/* ok, try bottom-up allocation first */
217 		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
218 						      size, align, nid, flags);
219 		if (ret)
220 			return ret;
221 
222 		/*
223 		 * we always limit bottom-up allocation above the kernel,
224 		 * but top-down allocation doesn't have the limit, so
225 		 * retrying top-down allocation may succeed when bottom-up
226 		 * allocation failed.
227 		 *
228 		 * bottom-up allocation is expected to be fail very rarely,
229 		 * so we use WARN_ONCE() here to see the stack trace if
230 		 * fail happens.
231 		 */
232 		WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
233 	}
234 
235 	return __memblock_find_range_top_down(start, end, size, align, nid,
236 					      flags);
237 }
238 
239 /**
240  * memblock_find_in_range - find free area in given range
241  * @start: start of candidate range
242  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
243  * @size: size of free area to find
244  * @align: alignment of free area to find
245  *
246  * Find @size free area aligned to @align in the specified range.
247  *
248  * RETURNS:
249  * Found address on success, 0 on failure.
250  */
251 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
252 					phys_addr_t end, phys_addr_t size,
253 					phys_addr_t align)
254 {
255 	phys_addr_t ret;
256 	ulong flags = choose_memblock_flags();
257 
258 again:
259 	ret = memblock_find_in_range_node(size, align, start, end,
260 					    NUMA_NO_NODE, flags);
261 
262 	if (!ret && (flags & MEMBLOCK_MIRROR)) {
263 		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
264 			&size);
265 		flags &= ~MEMBLOCK_MIRROR;
266 		goto again;
267 	}
268 
269 	return ret;
270 }
271 
272 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
273 {
274 	type->total_size -= type->regions[r].size;
275 	memmove(&type->regions[r], &type->regions[r + 1],
276 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
277 	type->cnt--;
278 
279 	/* Special case for empty arrays */
280 	if (type->cnt == 0) {
281 		WARN_ON(type->total_size != 0);
282 		type->cnt = 1;
283 		type->regions[0].base = 0;
284 		type->regions[0].size = 0;
285 		type->regions[0].flags = 0;
286 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
287 	}
288 }
289 
290 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
291 
292 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
293 					phys_addr_t *addr)
294 {
295 	if (memblock.reserved.regions == memblock_reserved_init_regions)
296 		return 0;
297 
298 	*addr = __pa(memblock.reserved.regions);
299 
300 	return PAGE_ALIGN(sizeof(struct memblock_region) *
301 			  memblock.reserved.max);
302 }
303 
304 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
305 					phys_addr_t *addr)
306 {
307 	if (memblock.memory.regions == memblock_memory_init_regions)
308 		return 0;
309 
310 	*addr = __pa(memblock.memory.regions);
311 
312 	return PAGE_ALIGN(sizeof(struct memblock_region) *
313 			  memblock.memory.max);
314 }
315 
316 #endif
317 
318 /**
319  * memblock_double_array - double the size of the memblock regions array
320  * @type: memblock type of the regions array being doubled
321  * @new_area_start: starting address of memory range to avoid overlap with
322  * @new_area_size: size of memory range to avoid overlap with
323  *
324  * Double the size of the @type regions array. If memblock is being used to
325  * allocate memory for a new reserved regions array and there is a previously
326  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
327  * waiting to be reserved, ensure the memory used by the new array does
328  * not overlap.
329  *
330  * RETURNS:
331  * 0 on success, -1 on failure.
332  */
333 static int __init_memblock memblock_double_array(struct memblock_type *type,
334 						phys_addr_t new_area_start,
335 						phys_addr_t new_area_size)
336 {
337 	struct memblock_region *new_array, *old_array;
338 	phys_addr_t old_alloc_size, new_alloc_size;
339 	phys_addr_t old_size, new_size, addr;
340 	int use_slab = slab_is_available();
341 	int *in_slab;
342 
343 	/* We don't allow resizing until we know about the reserved regions
344 	 * of memory that aren't suitable for allocation
345 	 */
346 	if (!memblock_can_resize)
347 		return -1;
348 
349 	/* Calculate new doubled size */
350 	old_size = type->max * sizeof(struct memblock_region);
351 	new_size = old_size << 1;
352 	/*
353 	 * We need to allocated new one align to PAGE_SIZE,
354 	 *   so we can free them completely later.
355 	 */
356 	old_alloc_size = PAGE_ALIGN(old_size);
357 	new_alloc_size = PAGE_ALIGN(new_size);
358 
359 	/* Retrieve the slab flag */
360 	if (type == &memblock.memory)
361 		in_slab = &memblock_memory_in_slab;
362 	else
363 		in_slab = &memblock_reserved_in_slab;
364 
365 	/* Try to find some space for it.
366 	 *
367 	 * WARNING: We assume that either slab_is_available() and we use it or
368 	 * we use MEMBLOCK for allocations. That means that this is unsafe to
369 	 * use when bootmem is currently active (unless bootmem itself is
370 	 * implemented on top of MEMBLOCK which isn't the case yet)
371 	 *
372 	 * This should however not be an issue for now, as we currently only
373 	 * call into MEMBLOCK while it's still active, or much later when slab
374 	 * is active for memory hotplug operations
375 	 */
376 	if (use_slab) {
377 		new_array = kmalloc(new_size, GFP_KERNEL);
378 		addr = new_array ? __pa(new_array) : 0;
379 	} else {
380 		/* only exclude range when trying to double reserved.regions */
381 		if (type != &memblock.reserved)
382 			new_area_start = new_area_size = 0;
383 
384 		addr = memblock_find_in_range(new_area_start + new_area_size,
385 						memblock.current_limit,
386 						new_alloc_size, PAGE_SIZE);
387 		if (!addr && new_area_size)
388 			addr = memblock_find_in_range(0,
389 				min(new_area_start, memblock.current_limit),
390 				new_alloc_size, PAGE_SIZE);
391 
392 		new_array = addr ? __va(addr) : NULL;
393 	}
394 	if (!addr) {
395 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
396 		       type->name, type->max, type->max * 2);
397 		return -1;
398 	}
399 
400 	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
401 			type->name, type->max * 2, (u64)addr,
402 			(u64)addr + new_size - 1);
403 
404 	/*
405 	 * Found space, we now need to move the array over before we add the
406 	 * reserved region since it may be our reserved array itself that is
407 	 * full.
408 	 */
409 	memcpy(new_array, type->regions, old_size);
410 	memset(new_array + type->max, 0, old_size);
411 	old_array = type->regions;
412 	type->regions = new_array;
413 	type->max <<= 1;
414 
415 	/* Free old array. We needn't free it if the array is the static one */
416 	if (*in_slab)
417 		kfree(old_array);
418 	else if (old_array != memblock_memory_init_regions &&
419 		 old_array != memblock_reserved_init_regions)
420 		memblock_free(__pa(old_array), old_alloc_size);
421 
422 	/*
423 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
424 	 * needn't do it
425 	 */
426 	if (!use_slab)
427 		BUG_ON(memblock_reserve(addr, new_alloc_size));
428 
429 	/* Update slab flag */
430 	*in_slab = use_slab;
431 
432 	return 0;
433 }
434 
435 /**
436  * memblock_merge_regions - merge neighboring compatible regions
437  * @type: memblock type to scan
438  *
439  * Scan @type and merge neighboring compatible regions.
440  */
441 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
442 {
443 	int i = 0;
444 
445 	/* cnt never goes below 1 */
446 	while (i < type->cnt - 1) {
447 		struct memblock_region *this = &type->regions[i];
448 		struct memblock_region *next = &type->regions[i + 1];
449 
450 		if (this->base + this->size != next->base ||
451 		    memblock_get_region_node(this) !=
452 		    memblock_get_region_node(next) ||
453 		    this->flags != next->flags) {
454 			BUG_ON(this->base + this->size > next->base);
455 			i++;
456 			continue;
457 		}
458 
459 		this->size += next->size;
460 		/* move forward from next + 1, index of which is i + 2 */
461 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
462 		type->cnt--;
463 	}
464 }
465 
466 /**
467  * memblock_insert_region - insert new memblock region
468  * @type:	memblock type to insert into
469  * @idx:	index for the insertion point
470  * @base:	base address of the new region
471  * @size:	size of the new region
472  * @nid:	node id of the new region
473  * @flags:	flags of the new region
474  *
475  * Insert new memblock region [@base,@base+@size) into @type at @idx.
476  * @type must already have extra room to accommodate the new region.
477  */
478 static void __init_memblock memblock_insert_region(struct memblock_type *type,
479 						   int idx, phys_addr_t base,
480 						   phys_addr_t size,
481 						   int nid, unsigned long flags)
482 {
483 	struct memblock_region *rgn = &type->regions[idx];
484 
485 	BUG_ON(type->cnt >= type->max);
486 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
487 	rgn->base = base;
488 	rgn->size = size;
489 	rgn->flags = flags;
490 	memblock_set_region_node(rgn, nid);
491 	type->cnt++;
492 	type->total_size += size;
493 }
494 
495 /**
496  * memblock_add_range - add new memblock region
497  * @type: memblock type to add new region into
498  * @base: base address of the new region
499  * @size: size of the new region
500  * @nid: nid of the new region
501  * @flags: flags of the new region
502  *
503  * Add new memblock region [@base,@base+@size) into @type.  The new region
504  * is allowed to overlap with existing ones - overlaps don't affect already
505  * existing regions.  @type is guaranteed to be minimal (all neighbouring
506  * compatible regions are merged) after the addition.
507  *
508  * RETURNS:
509  * 0 on success, -errno on failure.
510  */
511 int __init_memblock memblock_add_range(struct memblock_type *type,
512 				phys_addr_t base, phys_addr_t size,
513 				int nid, unsigned long flags)
514 {
515 	bool insert = false;
516 	phys_addr_t obase = base;
517 	phys_addr_t end = base + memblock_cap_size(base, &size);
518 	int idx, nr_new;
519 	struct memblock_region *rgn;
520 
521 	if (!size)
522 		return 0;
523 
524 	/* special case for empty array */
525 	if (type->regions[0].size == 0) {
526 		WARN_ON(type->cnt != 1 || type->total_size);
527 		type->regions[0].base = base;
528 		type->regions[0].size = size;
529 		type->regions[0].flags = flags;
530 		memblock_set_region_node(&type->regions[0], nid);
531 		type->total_size = size;
532 		return 0;
533 	}
534 repeat:
535 	/*
536 	 * The following is executed twice.  Once with %false @insert and
537 	 * then with %true.  The first counts the number of regions needed
538 	 * to accommodate the new area.  The second actually inserts them.
539 	 */
540 	base = obase;
541 	nr_new = 0;
542 
543 	for_each_memblock_type(type, rgn) {
544 		phys_addr_t rbase = rgn->base;
545 		phys_addr_t rend = rbase + rgn->size;
546 
547 		if (rbase >= end)
548 			break;
549 		if (rend <= base)
550 			continue;
551 		/*
552 		 * @rgn overlaps.  If it separates the lower part of new
553 		 * area, insert that portion.
554 		 */
555 		if (rbase > base) {
556 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
557 			WARN_ON(nid != memblock_get_region_node(rgn));
558 #endif
559 			WARN_ON(flags != rgn->flags);
560 			nr_new++;
561 			if (insert)
562 				memblock_insert_region(type, idx++, base,
563 						       rbase - base, nid,
564 						       flags);
565 		}
566 		/* area below @rend is dealt with, forget about it */
567 		base = min(rend, end);
568 	}
569 
570 	/* insert the remaining portion */
571 	if (base < end) {
572 		nr_new++;
573 		if (insert)
574 			memblock_insert_region(type, idx, base, end - base,
575 					       nid, flags);
576 	}
577 
578 	if (!nr_new)
579 		return 0;
580 
581 	/*
582 	 * If this was the first round, resize array and repeat for actual
583 	 * insertions; otherwise, merge and return.
584 	 */
585 	if (!insert) {
586 		while (type->cnt + nr_new > type->max)
587 			if (memblock_double_array(type, obase, size) < 0)
588 				return -ENOMEM;
589 		insert = true;
590 		goto repeat;
591 	} else {
592 		memblock_merge_regions(type);
593 		return 0;
594 	}
595 }
596 
597 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
598 				       int nid)
599 {
600 	return memblock_add_range(&memblock.memory, base, size, nid, 0);
601 }
602 
603 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
604 {
605 	phys_addr_t end = base + size - 1;
606 
607 	memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
608 		     &base, &end, (void *)_RET_IP_);
609 
610 	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
611 }
612 
613 /**
614  * memblock_isolate_range - isolate given range into disjoint memblocks
615  * @type: memblock type to isolate range for
616  * @base: base of range to isolate
617  * @size: size of range to isolate
618  * @start_rgn: out parameter for the start of isolated region
619  * @end_rgn: out parameter for the end of isolated region
620  *
621  * Walk @type and ensure that regions don't cross the boundaries defined by
622  * [@base,@base+@size).  Crossing regions are split at the boundaries,
623  * which may create at most two more regions.  The index of the first
624  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
625  *
626  * RETURNS:
627  * 0 on success, -errno on failure.
628  */
629 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
630 					phys_addr_t base, phys_addr_t size,
631 					int *start_rgn, int *end_rgn)
632 {
633 	phys_addr_t end = base + memblock_cap_size(base, &size);
634 	int idx;
635 	struct memblock_region *rgn;
636 
637 	*start_rgn = *end_rgn = 0;
638 
639 	if (!size)
640 		return 0;
641 
642 	/* we'll create at most two more regions */
643 	while (type->cnt + 2 > type->max)
644 		if (memblock_double_array(type, base, size) < 0)
645 			return -ENOMEM;
646 
647 	for_each_memblock_type(type, rgn) {
648 		phys_addr_t rbase = rgn->base;
649 		phys_addr_t rend = rbase + rgn->size;
650 
651 		if (rbase >= end)
652 			break;
653 		if (rend <= base)
654 			continue;
655 
656 		if (rbase < base) {
657 			/*
658 			 * @rgn intersects from below.  Split and continue
659 			 * to process the next region - the new top half.
660 			 */
661 			rgn->base = base;
662 			rgn->size -= base - rbase;
663 			type->total_size -= base - rbase;
664 			memblock_insert_region(type, idx, rbase, base - rbase,
665 					       memblock_get_region_node(rgn),
666 					       rgn->flags);
667 		} else if (rend > end) {
668 			/*
669 			 * @rgn intersects from above.  Split and redo the
670 			 * current region - the new bottom half.
671 			 */
672 			rgn->base = end;
673 			rgn->size -= end - rbase;
674 			type->total_size -= end - rbase;
675 			memblock_insert_region(type, idx--, rbase, end - rbase,
676 					       memblock_get_region_node(rgn),
677 					       rgn->flags);
678 		} else {
679 			/* @rgn is fully contained, record it */
680 			if (!*end_rgn)
681 				*start_rgn = idx;
682 			*end_rgn = idx + 1;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 static int __init_memblock memblock_remove_range(struct memblock_type *type,
690 					  phys_addr_t base, phys_addr_t size)
691 {
692 	int start_rgn, end_rgn;
693 	int i, ret;
694 
695 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
696 	if (ret)
697 		return ret;
698 
699 	for (i = end_rgn - 1; i >= start_rgn; i--)
700 		memblock_remove_region(type, i);
701 	return 0;
702 }
703 
704 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
705 {
706 	return memblock_remove_range(&memblock.memory, base, size);
707 }
708 
709 
710 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
711 {
712 	phys_addr_t end = base + size - 1;
713 
714 	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
715 		     &base, &end, (void *)_RET_IP_);
716 
717 	kmemleak_free_part_phys(base, size);
718 	return memblock_remove_range(&memblock.reserved, base, size);
719 }
720 
721 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
722 {
723 	phys_addr_t end = base + size - 1;
724 
725 	memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
726 		     &base, &end, (void *)_RET_IP_);
727 
728 	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
729 }
730 
731 /**
732  *
733  * This function isolates region [@base, @base + @size), and sets/clears flag
734  *
735  * Return 0 on success, -errno on failure.
736  */
737 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
738 				phys_addr_t size, int set, int flag)
739 {
740 	struct memblock_type *type = &memblock.memory;
741 	int i, ret, start_rgn, end_rgn;
742 
743 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
744 	if (ret)
745 		return ret;
746 
747 	for (i = start_rgn; i < end_rgn; i++)
748 		if (set)
749 			memblock_set_region_flags(&type->regions[i], flag);
750 		else
751 			memblock_clear_region_flags(&type->regions[i], flag);
752 
753 	memblock_merge_regions(type);
754 	return 0;
755 }
756 
757 /**
758  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
759  * @base: the base phys addr of the region
760  * @size: the size of the region
761  *
762  * Return 0 on success, -errno on failure.
763  */
764 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
765 {
766 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
767 }
768 
769 /**
770  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
771  * @base: the base phys addr of the region
772  * @size: the size of the region
773  *
774  * Return 0 on success, -errno on failure.
775  */
776 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
777 {
778 	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
779 }
780 
781 /**
782  * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
783  * @base: the base phys addr of the region
784  * @size: the size of the region
785  *
786  * Return 0 on success, -errno on failure.
787  */
788 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
789 {
790 	system_has_some_mirror = true;
791 
792 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
793 }
794 
795 /**
796  * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
797  * @base: the base phys addr of the region
798  * @size: the size of the region
799  *
800  * Return 0 on success, -errno on failure.
801  */
802 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
803 {
804 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
805 }
806 
807 /**
808  * __next_reserved_mem_region - next function for for_each_reserved_region()
809  * @idx: pointer to u64 loop variable
810  * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
811  * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
812  *
813  * Iterate over all reserved memory regions.
814  */
815 void __init_memblock __next_reserved_mem_region(u64 *idx,
816 					   phys_addr_t *out_start,
817 					   phys_addr_t *out_end)
818 {
819 	struct memblock_type *type = &memblock.reserved;
820 
821 	if (*idx < type->cnt) {
822 		struct memblock_region *r = &type->regions[*idx];
823 		phys_addr_t base = r->base;
824 		phys_addr_t size = r->size;
825 
826 		if (out_start)
827 			*out_start = base;
828 		if (out_end)
829 			*out_end = base + size - 1;
830 
831 		*idx += 1;
832 		return;
833 	}
834 
835 	/* signal end of iteration */
836 	*idx = ULLONG_MAX;
837 }
838 
839 /**
840  * __next__mem_range - next function for for_each_free_mem_range() etc.
841  * @idx: pointer to u64 loop variable
842  * @nid: node selector, %NUMA_NO_NODE for all nodes
843  * @flags: pick from blocks based on memory attributes
844  * @type_a: pointer to memblock_type from where the range is taken
845  * @type_b: pointer to memblock_type which excludes memory from being taken
846  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
847  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
848  * @out_nid: ptr to int for nid of the range, can be %NULL
849  *
850  * Find the first area from *@idx which matches @nid, fill the out
851  * parameters, and update *@idx for the next iteration.  The lower 32bit of
852  * *@idx contains index into type_a and the upper 32bit indexes the
853  * areas before each region in type_b.	For example, if type_b regions
854  * look like the following,
855  *
856  *	0:[0-16), 1:[32-48), 2:[128-130)
857  *
858  * The upper 32bit indexes the following regions.
859  *
860  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
861  *
862  * As both region arrays are sorted, the function advances the two indices
863  * in lockstep and returns each intersection.
864  */
865 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
866 				      struct memblock_type *type_a,
867 				      struct memblock_type *type_b,
868 				      phys_addr_t *out_start,
869 				      phys_addr_t *out_end, int *out_nid)
870 {
871 	int idx_a = *idx & 0xffffffff;
872 	int idx_b = *idx >> 32;
873 
874 	if (WARN_ONCE(nid == MAX_NUMNODES,
875 	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
876 		nid = NUMA_NO_NODE;
877 
878 	for (; idx_a < type_a->cnt; idx_a++) {
879 		struct memblock_region *m = &type_a->regions[idx_a];
880 
881 		phys_addr_t m_start = m->base;
882 		phys_addr_t m_end = m->base + m->size;
883 		int	    m_nid = memblock_get_region_node(m);
884 
885 		/* only memory regions are associated with nodes, check it */
886 		if (nid != NUMA_NO_NODE && nid != m_nid)
887 			continue;
888 
889 		/* skip hotpluggable memory regions if needed */
890 		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
891 			continue;
892 
893 		/* if we want mirror memory skip non-mirror memory regions */
894 		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
895 			continue;
896 
897 		/* skip nomap memory unless we were asked for it explicitly */
898 		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
899 			continue;
900 
901 		if (!type_b) {
902 			if (out_start)
903 				*out_start = m_start;
904 			if (out_end)
905 				*out_end = m_end;
906 			if (out_nid)
907 				*out_nid = m_nid;
908 			idx_a++;
909 			*idx = (u32)idx_a | (u64)idx_b << 32;
910 			return;
911 		}
912 
913 		/* scan areas before each reservation */
914 		for (; idx_b < type_b->cnt + 1; idx_b++) {
915 			struct memblock_region *r;
916 			phys_addr_t r_start;
917 			phys_addr_t r_end;
918 
919 			r = &type_b->regions[idx_b];
920 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
921 			r_end = idx_b < type_b->cnt ?
922 				r->base : ULLONG_MAX;
923 
924 			/*
925 			 * if idx_b advanced past idx_a,
926 			 * break out to advance idx_a
927 			 */
928 			if (r_start >= m_end)
929 				break;
930 			/* if the two regions intersect, we're done */
931 			if (m_start < r_end) {
932 				if (out_start)
933 					*out_start =
934 						max(m_start, r_start);
935 				if (out_end)
936 					*out_end = min(m_end, r_end);
937 				if (out_nid)
938 					*out_nid = m_nid;
939 				/*
940 				 * The region which ends first is
941 				 * advanced for the next iteration.
942 				 */
943 				if (m_end <= r_end)
944 					idx_a++;
945 				else
946 					idx_b++;
947 				*idx = (u32)idx_a | (u64)idx_b << 32;
948 				return;
949 			}
950 		}
951 	}
952 
953 	/* signal end of iteration */
954 	*idx = ULLONG_MAX;
955 }
956 
957 /**
958  * __next_mem_range_rev - generic next function for for_each_*_range_rev()
959  *
960  * Finds the next range from type_a which is not marked as unsuitable
961  * in type_b.
962  *
963  * @idx: pointer to u64 loop variable
964  * @nid: node selector, %NUMA_NO_NODE for all nodes
965  * @flags: pick from blocks based on memory attributes
966  * @type_a: pointer to memblock_type from where the range is taken
967  * @type_b: pointer to memblock_type which excludes memory from being taken
968  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
969  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
970  * @out_nid: ptr to int for nid of the range, can be %NULL
971  *
972  * Reverse of __next_mem_range().
973  */
974 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
975 					  struct memblock_type *type_a,
976 					  struct memblock_type *type_b,
977 					  phys_addr_t *out_start,
978 					  phys_addr_t *out_end, int *out_nid)
979 {
980 	int idx_a = *idx & 0xffffffff;
981 	int idx_b = *idx >> 32;
982 
983 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
984 		nid = NUMA_NO_NODE;
985 
986 	if (*idx == (u64)ULLONG_MAX) {
987 		idx_a = type_a->cnt - 1;
988 		if (type_b != NULL)
989 			idx_b = type_b->cnt;
990 		else
991 			idx_b = 0;
992 	}
993 
994 	for (; idx_a >= 0; idx_a--) {
995 		struct memblock_region *m = &type_a->regions[idx_a];
996 
997 		phys_addr_t m_start = m->base;
998 		phys_addr_t m_end = m->base + m->size;
999 		int m_nid = memblock_get_region_node(m);
1000 
1001 		/* only memory regions are associated with nodes, check it */
1002 		if (nid != NUMA_NO_NODE && nid != m_nid)
1003 			continue;
1004 
1005 		/* skip hotpluggable memory regions if needed */
1006 		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1007 			continue;
1008 
1009 		/* if we want mirror memory skip non-mirror memory regions */
1010 		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1011 			continue;
1012 
1013 		/* skip nomap memory unless we were asked for it explicitly */
1014 		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1015 			continue;
1016 
1017 		if (!type_b) {
1018 			if (out_start)
1019 				*out_start = m_start;
1020 			if (out_end)
1021 				*out_end = m_end;
1022 			if (out_nid)
1023 				*out_nid = m_nid;
1024 			idx_a--;
1025 			*idx = (u32)idx_a | (u64)idx_b << 32;
1026 			return;
1027 		}
1028 
1029 		/* scan areas before each reservation */
1030 		for (; idx_b >= 0; idx_b--) {
1031 			struct memblock_region *r;
1032 			phys_addr_t r_start;
1033 			phys_addr_t r_end;
1034 
1035 			r = &type_b->regions[idx_b];
1036 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1037 			r_end = idx_b < type_b->cnt ?
1038 				r->base : ULLONG_MAX;
1039 			/*
1040 			 * if idx_b advanced past idx_a,
1041 			 * break out to advance idx_a
1042 			 */
1043 
1044 			if (r_end <= m_start)
1045 				break;
1046 			/* if the two regions intersect, we're done */
1047 			if (m_end > r_start) {
1048 				if (out_start)
1049 					*out_start = max(m_start, r_start);
1050 				if (out_end)
1051 					*out_end = min(m_end, r_end);
1052 				if (out_nid)
1053 					*out_nid = m_nid;
1054 				if (m_start >= r_start)
1055 					idx_a--;
1056 				else
1057 					idx_b--;
1058 				*idx = (u32)idx_a | (u64)idx_b << 32;
1059 				return;
1060 			}
1061 		}
1062 	}
1063 	/* signal end of iteration */
1064 	*idx = ULLONG_MAX;
1065 }
1066 
1067 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1068 /*
1069  * Common iterator interface used to define for_each_mem_range().
1070  */
1071 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1072 				unsigned long *out_start_pfn,
1073 				unsigned long *out_end_pfn, int *out_nid)
1074 {
1075 	struct memblock_type *type = &memblock.memory;
1076 	struct memblock_region *r;
1077 
1078 	while (++*idx < type->cnt) {
1079 		r = &type->regions[*idx];
1080 
1081 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1082 			continue;
1083 		if (nid == MAX_NUMNODES || nid == r->nid)
1084 			break;
1085 	}
1086 	if (*idx >= type->cnt) {
1087 		*idx = -1;
1088 		return;
1089 	}
1090 
1091 	if (out_start_pfn)
1092 		*out_start_pfn = PFN_UP(r->base);
1093 	if (out_end_pfn)
1094 		*out_end_pfn = PFN_DOWN(r->base + r->size);
1095 	if (out_nid)
1096 		*out_nid = r->nid;
1097 }
1098 
1099 unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1100 						      unsigned long max_pfn)
1101 {
1102 	struct memblock_type *type = &memblock.memory;
1103 	unsigned int right = type->cnt;
1104 	unsigned int mid, left = 0;
1105 	phys_addr_t addr = PFN_PHYS(pfn + 1);
1106 
1107 	do {
1108 		mid = (right + left) / 2;
1109 
1110 		if (addr < type->regions[mid].base)
1111 			right = mid;
1112 		else if (addr >= (type->regions[mid].base +
1113 				  type->regions[mid].size))
1114 			left = mid + 1;
1115 		else {
1116 			/* addr is within the region, so pfn + 1 is valid */
1117 			return min(pfn + 1, max_pfn);
1118 		}
1119 	} while (left < right);
1120 
1121 	if (right == type->cnt)
1122 		return max_pfn;
1123 	else
1124 		return min(PHYS_PFN(type->regions[right].base), max_pfn);
1125 }
1126 
1127 /**
1128  * memblock_set_node - set node ID on memblock regions
1129  * @base: base of area to set node ID for
1130  * @size: size of area to set node ID for
1131  * @type: memblock type to set node ID for
1132  * @nid: node ID to set
1133  *
1134  * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1135  * Regions which cross the area boundaries are split as necessary.
1136  *
1137  * RETURNS:
1138  * 0 on success, -errno on failure.
1139  */
1140 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1141 				      struct memblock_type *type, int nid)
1142 {
1143 	int start_rgn, end_rgn;
1144 	int i, ret;
1145 
1146 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1147 	if (ret)
1148 		return ret;
1149 
1150 	for (i = start_rgn; i < end_rgn; i++)
1151 		memblock_set_region_node(&type->regions[i], nid);
1152 
1153 	memblock_merge_regions(type);
1154 	return 0;
1155 }
1156 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1157 
1158 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1159 					phys_addr_t align, phys_addr_t start,
1160 					phys_addr_t end, int nid, ulong flags)
1161 {
1162 	phys_addr_t found;
1163 
1164 	if (!align)
1165 		align = SMP_CACHE_BYTES;
1166 
1167 	found = memblock_find_in_range_node(size, align, start, end, nid,
1168 					    flags);
1169 	if (found && !memblock_reserve(found, size)) {
1170 		/*
1171 		 * The min_count is set to 0 so that memblock allocations are
1172 		 * never reported as leaks.
1173 		 */
1174 		kmemleak_alloc_phys(found, size, 0, 0);
1175 		return found;
1176 	}
1177 	return 0;
1178 }
1179 
1180 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1181 					phys_addr_t start, phys_addr_t end,
1182 					ulong flags)
1183 {
1184 	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1185 					flags);
1186 }
1187 
1188 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1189 					phys_addr_t align, phys_addr_t max_addr,
1190 					int nid, ulong flags)
1191 {
1192 	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1193 }
1194 
1195 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1196 {
1197 	ulong flags = choose_memblock_flags();
1198 	phys_addr_t ret;
1199 
1200 again:
1201 	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1202 				      nid, flags);
1203 
1204 	if (!ret && (flags & MEMBLOCK_MIRROR)) {
1205 		flags &= ~MEMBLOCK_MIRROR;
1206 		goto again;
1207 	}
1208 	return ret;
1209 }
1210 
1211 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1212 {
1213 	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1214 				       MEMBLOCK_NONE);
1215 }
1216 
1217 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1218 {
1219 	phys_addr_t alloc;
1220 
1221 	alloc = __memblock_alloc_base(size, align, max_addr);
1222 
1223 	if (alloc == 0)
1224 		panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1225 		      &size, &max_addr);
1226 
1227 	return alloc;
1228 }
1229 
1230 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1231 {
1232 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1233 }
1234 
1235 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1236 {
1237 	phys_addr_t res = memblock_alloc_nid(size, align, nid);
1238 
1239 	if (res)
1240 		return res;
1241 	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1242 }
1243 
1244 /**
1245  * memblock_virt_alloc_internal - allocate boot memory block
1246  * @size: size of memory block to be allocated in bytes
1247  * @align: alignment of the region and block's size
1248  * @min_addr: the lower bound of the memory region to allocate (phys address)
1249  * @max_addr: the upper bound of the memory region to allocate (phys address)
1250  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1251  *
1252  * The @min_addr limit is dropped if it can not be satisfied and the allocation
1253  * will fall back to memory below @min_addr. Also, allocation may fall back
1254  * to any node in the system if the specified node can not
1255  * hold the requested memory.
1256  *
1257  * The allocation is performed from memory region limited by
1258  * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1259  *
1260  * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1261  *
1262  * The phys address of allocated boot memory block is converted to virtual and
1263  * allocated memory is reset to 0.
1264  *
1265  * In addition, function sets the min_count to 0 using kmemleak_alloc for
1266  * allocated boot memory block, so that it is never reported as leaks.
1267  *
1268  * RETURNS:
1269  * Virtual address of allocated memory block on success, NULL on failure.
1270  */
1271 static void * __init memblock_virt_alloc_internal(
1272 				phys_addr_t size, phys_addr_t align,
1273 				phys_addr_t min_addr, phys_addr_t max_addr,
1274 				int nid)
1275 {
1276 	phys_addr_t alloc;
1277 	void *ptr;
1278 	ulong flags = choose_memblock_flags();
1279 
1280 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1281 		nid = NUMA_NO_NODE;
1282 
1283 	/*
1284 	 * Detect any accidental use of these APIs after slab is ready, as at
1285 	 * this moment memblock may be deinitialized already and its
1286 	 * internal data may be destroyed (after execution of free_all_bootmem)
1287 	 */
1288 	if (WARN_ON_ONCE(slab_is_available()))
1289 		return kzalloc_node(size, GFP_NOWAIT, nid);
1290 
1291 	if (!align)
1292 		align = SMP_CACHE_BYTES;
1293 
1294 	if (max_addr > memblock.current_limit)
1295 		max_addr = memblock.current_limit;
1296 again:
1297 	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1298 					    nid, flags);
1299 	if (alloc && !memblock_reserve(alloc, size))
1300 		goto done;
1301 
1302 	if (nid != NUMA_NO_NODE) {
1303 		alloc = memblock_find_in_range_node(size, align, min_addr,
1304 						    max_addr, NUMA_NO_NODE,
1305 						    flags);
1306 		if (alloc && !memblock_reserve(alloc, size))
1307 			goto done;
1308 	}
1309 
1310 	if (min_addr) {
1311 		min_addr = 0;
1312 		goto again;
1313 	}
1314 
1315 	if (flags & MEMBLOCK_MIRROR) {
1316 		flags &= ~MEMBLOCK_MIRROR;
1317 		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1318 			&size);
1319 		goto again;
1320 	}
1321 
1322 	return NULL;
1323 done:
1324 	ptr = phys_to_virt(alloc);
1325 	memset(ptr, 0, size);
1326 
1327 	/*
1328 	 * The min_count is set to 0 so that bootmem allocated blocks
1329 	 * are never reported as leaks. This is because many of these blocks
1330 	 * are only referred via the physical address which is not
1331 	 * looked up by kmemleak.
1332 	 */
1333 	kmemleak_alloc(ptr, size, 0, 0);
1334 
1335 	return ptr;
1336 }
1337 
1338 /**
1339  * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1340  * @size: size of memory block to be allocated in bytes
1341  * @align: alignment of the region and block's size
1342  * @min_addr: the lower bound of the memory region from where the allocation
1343  *	  is preferred (phys address)
1344  * @max_addr: the upper bound of the memory region from where the allocation
1345  *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1346  *	      allocate only from memory limited by memblock.current_limit value
1347  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1348  *
1349  * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1350  * additional debug information (including caller info), if enabled.
1351  *
1352  * RETURNS:
1353  * Virtual address of allocated memory block on success, NULL on failure.
1354  */
1355 void * __init memblock_virt_alloc_try_nid_nopanic(
1356 				phys_addr_t size, phys_addr_t align,
1357 				phys_addr_t min_addr, phys_addr_t max_addr,
1358 				int nid)
1359 {
1360 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1361 		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1362 		     (u64)max_addr, (void *)_RET_IP_);
1363 	return memblock_virt_alloc_internal(size, align, min_addr,
1364 					     max_addr, nid);
1365 }
1366 
1367 /**
1368  * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1369  * @size: size of memory block to be allocated in bytes
1370  * @align: alignment of the region and block's size
1371  * @min_addr: the lower bound of the memory region from where the allocation
1372  *	  is preferred (phys address)
1373  * @max_addr: the upper bound of the memory region from where the allocation
1374  *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1375  *	      allocate only from memory limited by memblock.current_limit value
1376  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1377  *
1378  * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1379  * which provides debug information (including caller info), if enabled,
1380  * and panics if the request can not be satisfied.
1381  *
1382  * RETURNS:
1383  * Virtual address of allocated memory block on success, NULL on failure.
1384  */
1385 void * __init memblock_virt_alloc_try_nid(
1386 			phys_addr_t size, phys_addr_t align,
1387 			phys_addr_t min_addr, phys_addr_t max_addr,
1388 			int nid)
1389 {
1390 	void *ptr;
1391 
1392 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1393 		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1394 		     (u64)max_addr, (void *)_RET_IP_);
1395 	ptr = memblock_virt_alloc_internal(size, align,
1396 					   min_addr, max_addr, nid);
1397 	if (ptr)
1398 		return ptr;
1399 
1400 	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1401 	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1402 	      (u64)max_addr);
1403 	return NULL;
1404 }
1405 
1406 /**
1407  * __memblock_free_early - free boot memory block
1408  * @base: phys starting address of the  boot memory block
1409  * @size: size of the boot memory block in bytes
1410  *
1411  * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1412  * The freeing memory will not be released to the buddy allocator.
1413  */
1414 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1415 {
1416 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1417 		     __func__, (u64)base, (u64)base + size - 1,
1418 		     (void *)_RET_IP_);
1419 	kmemleak_free_part_phys(base, size);
1420 	memblock_remove_range(&memblock.reserved, base, size);
1421 }
1422 
1423 /*
1424  * __memblock_free_late - free bootmem block pages directly to buddy allocator
1425  * @addr: phys starting address of the  boot memory block
1426  * @size: size of the boot memory block in bytes
1427  *
1428  * This is only useful when the bootmem allocator has already been torn
1429  * down, but we are still initializing the system.  Pages are released directly
1430  * to the buddy allocator, no bootmem metadata is updated because it is gone.
1431  */
1432 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1433 {
1434 	u64 cursor, end;
1435 
1436 	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1437 		     __func__, (u64)base, (u64)base + size - 1,
1438 		     (void *)_RET_IP_);
1439 	kmemleak_free_part_phys(base, size);
1440 	cursor = PFN_UP(base);
1441 	end = PFN_DOWN(base + size);
1442 
1443 	for (; cursor < end; cursor++) {
1444 		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1445 		totalram_pages++;
1446 	}
1447 }
1448 
1449 /*
1450  * Remaining API functions
1451  */
1452 
1453 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1454 {
1455 	return memblock.memory.total_size;
1456 }
1457 
1458 phys_addr_t __init_memblock memblock_reserved_size(void)
1459 {
1460 	return memblock.reserved.total_size;
1461 }
1462 
1463 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1464 {
1465 	unsigned long pages = 0;
1466 	struct memblock_region *r;
1467 	unsigned long start_pfn, end_pfn;
1468 
1469 	for_each_memblock(memory, r) {
1470 		start_pfn = memblock_region_memory_base_pfn(r);
1471 		end_pfn = memblock_region_memory_end_pfn(r);
1472 		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1473 		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1474 		pages += end_pfn - start_pfn;
1475 	}
1476 
1477 	return PFN_PHYS(pages);
1478 }
1479 
1480 /* lowest address */
1481 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1482 {
1483 	return memblock.memory.regions[0].base;
1484 }
1485 
1486 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1487 {
1488 	int idx = memblock.memory.cnt - 1;
1489 
1490 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1491 }
1492 
1493 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1494 {
1495 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1496 	struct memblock_region *r;
1497 
1498 	/*
1499 	 * translate the memory @limit size into the max address within one of
1500 	 * the memory memblock regions, if the @limit exceeds the total size
1501 	 * of those regions, max_addr will keep original value ULLONG_MAX
1502 	 */
1503 	for_each_memblock(memory, r) {
1504 		if (limit <= r->size) {
1505 			max_addr = r->base + limit;
1506 			break;
1507 		}
1508 		limit -= r->size;
1509 	}
1510 
1511 	return max_addr;
1512 }
1513 
1514 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1515 {
1516 	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1517 
1518 	if (!limit)
1519 		return;
1520 
1521 	max_addr = __find_max_addr(limit);
1522 
1523 	/* @limit exceeds the total size of the memory, do nothing */
1524 	if (max_addr == (phys_addr_t)ULLONG_MAX)
1525 		return;
1526 
1527 	/* truncate both memory and reserved regions */
1528 	memblock_remove_range(&memblock.memory, max_addr,
1529 			      (phys_addr_t)ULLONG_MAX);
1530 	memblock_remove_range(&memblock.reserved, max_addr,
1531 			      (phys_addr_t)ULLONG_MAX);
1532 }
1533 
1534 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1535 {
1536 	struct memblock_type *type = &memblock.memory;
1537 	phys_addr_t max_addr;
1538 	int i, ret, start_rgn, end_rgn;
1539 
1540 	if (!limit)
1541 		return;
1542 
1543 	max_addr = __find_max_addr(limit);
1544 
1545 	/* @limit exceeds the total size of the memory, do nothing */
1546 	if (max_addr == (phys_addr_t)ULLONG_MAX)
1547 		return;
1548 
1549 	ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX,
1550 				&start_rgn, &end_rgn);
1551 	if (ret)
1552 		return;
1553 
1554 	/* remove all the MAP regions above the limit */
1555 	for (i = end_rgn - 1; i >= start_rgn; i--) {
1556 		if (!memblock_is_nomap(&type->regions[i]))
1557 			memblock_remove_region(type, i);
1558 	}
1559 	/* truncate the reserved regions */
1560 	memblock_remove_range(&memblock.reserved, max_addr,
1561 			      (phys_addr_t)ULLONG_MAX);
1562 }
1563 
1564 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1565 {
1566 	unsigned int left = 0, right = type->cnt;
1567 
1568 	do {
1569 		unsigned int mid = (right + left) / 2;
1570 
1571 		if (addr < type->regions[mid].base)
1572 			right = mid;
1573 		else if (addr >= (type->regions[mid].base +
1574 				  type->regions[mid].size))
1575 			left = mid + 1;
1576 		else
1577 			return mid;
1578 	} while (left < right);
1579 	return -1;
1580 }
1581 
1582 bool __init memblock_is_reserved(phys_addr_t addr)
1583 {
1584 	return memblock_search(&memblock.reserved, addr) != -1;
1585 }
1586 
1587 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1588 {
1589 	return memblock_search(&memblock.memory, addr) != -1;
1590 }
1591 
1592 int __init_memblock memblock_is_map_memory(phys_addr_t addr)
1593 {
1594 	int i = memblock_search(&memblock.memory, addr);
1595 
1596 	if (i == -1)
1597 		return false;
1598 	return !memblock_is_nomap(&memblock.memory.regions[i]);
1599 }
1600 
1601 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1602 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1603 			 unsigned long *start_pfn, unsigned long *end_pfn)
1604 {
1605 	struct memblock_type *type = &memblock.memory;
1606 	int mid = memblock_search(type, PFN_PHYS(pfn));
1607 
1608 	if (mid == -1)
1609 		return -1;
1610 
1611 	*start_pfn = PFN_DOWN(type->regions[mid].base);
1612 	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1613 
1614 	return type->regions[mid].nid;
1615 }
1616 #endif
1617 
1618 /**
1619  * memblock_is_region_memory - check if a region is a subset of memory
1620  * @base: base of region to check
1621  * @size: size of region to check
1622  *
1623  * Check if the region [@base, @base+@size) is a subset of a memory block.
1624  *
1625  * RETURNS:
1626  * 0 if false, non-zero if true
1627  */
1628 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1629 {
1630 	int idx = memblock_search(&memblock.memory, base);
1631 	phys_addr_t end = base + memblock_cap_size(base, &size);
1632 
1633 	if (idx == -1)
1634 		return 0;
1635 	return (memblock.memory.regions[idx].base +
1636 		 memblock.memory.regions[idx].size) >= end;
1637 }
1638 
1639 /**
1640  * memblock_is_region_reserved - check if a region intersects reserved memory
1641  * @base: base of region to check
1642  * @size: size of region to check
1643  *
1644  * Check if the region [@base, @base+@size) intersects a reserved memory block.
1645  *
1646  * RETURNS:
1647  * True if they intersect, false if not.
1648  */
1649 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1650 {
1651 	memblock_cap_size(base, &size);
1652 	return memblock_overlaps_region(&memblock.reserved, base, size);
1653 }
1654 
1655 void __init_memblock memblock_trim_memory(phys_addr_t align)
1656 {
1657 	phys_addr_t start, end, orig_start, orig_end;
1658 	struct memblock_region *r;
1659 
1660 	for_each_memblock(memory, r) {
1661 		orig_start = r->base;
1662 		orig_end = r->base + r->size;
1663 		start = round_up(orig_start, align);
1664 		end = round_down(orig_end, align);
1665 
1666 		if (start == orig_start && end == orig_end)
1667 			continue;
1668 
1669 		if (start < end) {
1670 			r->base = start;
1671 			r->size = end - start;
1672 		} else {
1673 			memblock_remove_region(&memblock.memory,
1674 					       r - memblock.memory.regions);
1675 			r--;
1676 		}
1677 	}
1678 }
1679 
1680 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1681 {
1682 	memblock.current_limit = limit;
1683 }
1684 
1685 phys_addr_t __init_memblock memblock_get_current_limit(void)
1686 {
1687 	return memblock.current_limit;
1688 }
1689 
1690 static void __init_memblock memblock_dump(struct memblock_type *type)
1691 {
1692 	phys_addr_t base, end, size;
1693 	unsigned long flags;
1694 	int idx;
1695 	struct memblock_region *rgn;
1696 
1697 	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1698 
1699 	for_each_memblock_type(type, rgn) {
1700 		char nid_buf[32] = "";
1701 
1702 		base = rgn->base;
1703 		size = rgn->size;
1704 		end = base + size - 1;
1705 		flags = rgn->flags;
1706 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1707 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1708 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1709 				 memblock_get_region_node(rgn));
1710 #endif
1711 		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
1712 			type->name, idx, &base, &end, &size, nid_buf, flags);
1713 	}
1714 }
1715 
1716 void __init_memblock __memblock_dump_all(void)
1717 {
1718 	pr_info("MEMBLOCK configuration:\n");
1719 	pr_info(" memory size = %pa reserved size = %pa\n",
1720 		&memblock.memory.total_size,
1721 		&memblock.reserved.total_size);
1722 
1723 	memblock_dump(&memblock.memory);
1724 	memblock_dump(&memblock.reserved);
1725 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1726 	memblock_dump(&memblock.physmem);
1727 #endif
1728 }
1729 
1730 void __init memblock_allow_resize(void)
1731 {
1732 	memblock_can_resize = 1;
1733 }
1734 
1735 static int __init early_memblock(char *p)
1736 {
1737 	if (p && strstr(p, "debug"))
1738 		memblock_debug = 1;
1739 	return 0;
1740 }
1741 early_param("memblock", early_memblock);
1742 
1743 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1744 
1745 static int memblock_debug_show(struct seq_file *m, void *private)
1746 {
1747 	struct memblock_type *type = m->private;
1748 	struct memblock_region *reg;
1749 	int i;
1750 	phys_addr_t end;
1751 
1752 	for (i = 0; i < type->cnt; i++) {
1753 		reg = &type->regions[i];
1754 		end = reg->base + reg->size - 1;
1755 
1756 		seq_printf(m, "%4d: ", i);
1757 		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
1758 	}
1759 	return 0;
1760 }
1761 
1762 static int memblock_debug_open(struct inode *inode, struct file *file)
1763 {
1764 	return single_open(file, memblock_debug_show, inode->i_private);
1765 }
1766 
1767 static const struct file_operations memblock_debug_fops = {
1768 	.open = memblock_debug_open,
1769 	.read = seq_read,
1770 	.llseek = seq_lseek,
1771 	.release = single_release,
1772 };
1773 
1774 static int __init memblock_init_debugfs(void)
1775 {
1776 	struct dentry *root = debugfs_create_dir("memblock", NULL);
1777 	if (!root)
1778 		return -ENXIO;
1779 	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1780 	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1781 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1782 	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1783 #endif
1784 
1785 	return 0;
1786 }
1787 __initcall(memblock_init_debugfs);
1788 
1789 #endif /* CONFIG_DEBUG_FS */
1790