1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19
20 #include <asm/sections.h>
21 #include <linux/io.h>
22
23 #include "internal.h"
24
25 #define INIT_MEMBLOCK_REGIONS 128
26 #define INIT_PHYSMEM_REGIONS 4
27
28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30 #endif
31
32 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
33 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
34 #endif
35
36 /**
37 * DOC: memblock overview
38 *
39 * Memblock is a method of managing memory regions during the early
40 * boot period when the usual kernel memory allocators are not up and
41 * running.
42 *
43 * Memblock views the system memory as collections of contiguous
44 * regions. There are several types of these collections:
45 *
46 * * ``memory`` - describes the physical memory available to the
47 * kernel; this may differ from the actual physical memory installed
48 * in the system, for instance when the memory is restricted with
49 * ``mem=`` command line parameter
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmem`` - describes the actual physical memory available during
52 * boot regardless of the possible restrictions and memory hot(un)plug;
53 * the ``physmem`` type is only available on some architectures.
54 *
55 * Each region is represented by struct memblock_region that
56 * defines the region extents, its attributes and NUMA node id on NUMA
57 * systems. Every memory type is described by the struct memblock_type
58 * which contains an array of memory regions along with
59 * the allocator metadata. The "memory" and "reserved" types are nicely
60 * wrapped with struct memblock. This structure is statically
61 * initialized at build time. The region arrays are initially sized to
62 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
63 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
64 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
65 * The memblock_allow_resize() enables automatic resizing of the region
66 * arrays during addition of new regions. This feature should be used
67 * with care so that memory allocated for the region array will not
68 * overlap with areas that should be reserved, for example initrd.
69 *
70 * The early architecture setup should tell memblock what the physical
71 * memory layout is by using memblock_add() or memblock_add_node()
72 * functions. The first function does not assign the region to a NUMA
73 * node and it is appropriate for UMA systems. Yet, it is possible to
74 * use it on NUMA systems as well and assign the region to a NUMA node
75 * later in the setup process using memblock_set_node(). The
76 * memblock_add_node() performs such an assignment directly.
77 *
78 * Once memblock is setup the memory can be allocated using one of the
79 * API variants:
80 *
81 * * memblock_phys_alloc*() - these functions return the **physical**
82 * address of the allocated memory
83 * * memblock_alloc*() - these functions return the **virtual** address
84 * of the allocated memory.
85 *
86 * Note, that both API variants use implicit assumptions about allowed
87 * memory ranges and the fallback methods. Consult the documentation
88 * of memblock_alloc_internal() and memblock_alloc_range_nid()
89 * functions for more elaborate description.
90 *
91 * As the system boot progresses, the architecture specific mem_init()
92 * function frees all the memory to the buddy page allocator.
93 *
94 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
95 * memblock data structures (except "physmem") will be discarded after the
96 * system initialization completes.
97 */
98
99 #ifndef CONFIG_NUMA
100 struct pglist_data __refdata contig_page_data;
101 EXPORT_SYMBOL(contig_page_data);
102 #endif
103
104 unsigned long max_low_pfn;
105 unsigned long min_low_pfn;
106 unsigned long max_pfn;
107 unsigned long long max_possible_pfn;
108
109 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock;
110 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
112 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
113 #endif
114
115 struct memblock memblock __initdata_memblock = {
116 .memory.regions = memblock_memory_init_regions,
117 .memory.cnt = 1, /* empty dummy entry */
118 .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS,
119 .memory.name = "memory",
120
121 .reserved.regions = memblock_reserved_init_regions,
122 .reserved.cnt = 1, /* empty dummy entry */
123 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
124 .reserved.name = "reserved",
125
126 .bottom_up = false,
127 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
128 };
129
130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
131 struct memblock_type physmem = {
132 .regions = memblock_physmem_init_regions,
133 .cnt = 1, /* empty dummy entry */
134 .max = INIT_PHYSMEM_REGIONS,
135 .name = "physmem",
136 };
137 #endif
138
139 /*
140 * keep a pointer to &memblock.memory in the text section to use it in
141 * __next_mem_range() and its helpers.
142 * For architectures that do not keep memblock data after init, this
143 * pointer will be reset to NULL at memblock_discard()
144 */
145 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
146
147 #define for_each_memblock_type(i, memblock_type, rgn) \
148 for (i = 0, rgn = &memblock_type->regions[0]; \
149 i < memblock_type->cnt; \
150 i++, rgn = &memblock_type->regions[i])
151
152 #define memblock_dbg(fmt, ...) \
153 do { \
154 if (memblock_debug) \
155 pr_info(fmt, ##__VA_ARGS__); \
156 } while (0)
157
158 static int memblock_debug __initdata_memblock;
159 static bool system_has_some_mirror __initdata_memblock;
160 static int memblock_can_resize __initdata_memblock;
161 static int memblock_memory_in_slab __initdata_memblock;
162 static int memblock_reserved_in_slab __initdata_memblock;
163
memblock_has_mirror(void)164 bool __init_memblock memblock_has_mirror(void)
165 {
166 return system_has_some_mirror;
167 }
168
choose_memblock_flags(void)169 static enum memblock_flags __init_memblock choose_memblock_flags(void)
170 {
171 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
172 }
173
174 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
memblock_cap_size(phys_addr_t base,phys_addr_t * size)175 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
176 {
177 return *size = min(*size, PHYS_ADDR_MAX - base);
178 }
179
180 /*
181 * Address comparison utilities
182 */
183 unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1,phys_addr_t size1,phys_addr_t base2,phys_addr_t size2)184 memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
185 phys_addr_t size2)
186 {
187 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
188 }
189
memblock_overlaps_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size)190 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
191 phys_addr_t base, phys_addr_t size)
192 {
193 unsigned long i;
194
195 memblock_cap_size(base, &size);
196
197 for (i = 0; i < type->cnt; i++)
198 if (memblock_addrs_overlap(base, size, type->regions[i].base,
199 type->regions[i].size))
200 break;
201 return i < type->cnt;
202 }
203
204 /**
205 * __memblock_find_range_bottom_up - find free area utility in bottom-up
206 * @start: start of candidate range
207 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
208 * %MEMBLOCK_ALLOC_ACCESSIBLE
209 * @size: size of free area to find
210 * @align: alignment of free area to find
211 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
212 * @flags: pick from blocks based on memory attributes
213 *
214 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
215 *
216 * Return:
217 * Found address on success, 0 on failure.
218 */
219 static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)220 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
221 phys_addr_t size, phys_addr_t align, int nid,
222 enum memblock_flags flags)
223 {
224 phys_addr_t this_start, this_end, cand;
225 u64 i;
226
227 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
228 this_start = clamp(this_start, start, end);
229 this_end = clamp(this_end, start, end);
230
231 cand = round_up(this_start, align);
232 if (cand < this_end && this_end - cand >= size)
233 return cand;
234 }
235
236 return 0;
237 }
238
239 /**
240 * __memblock_find_range_top_down - find free area utility, in top-down
241 * @start: start of candidate range
242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
243 * %MEMBLOCK_ALLOC_ACCESSIBLE
244 * @size: size of free area to find
245 * @align: alignment of free area to find
246 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
247 * @flags: pick from blocks based on memory attributes
248 *
249 * Utility called from memblock_find_in_range_node(), find free area top-down.
250 *
251 * Return:
252 * Found address on success, 0 on failure.
253 */
254 static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)255 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
256 phys_addr_t size, phys_addr_t align, int nid,
257 enum memblock_flags flags)
258 {
259 phys_addr_t this_start, this_end, cand;
260 u64 i;
261
262 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
263 NULL) {
264 this_start = clamp(this_start, start, end);
265 this_end = clamp(this_end, start, end);
266
267 if (this_end < size)
268 continue;
269
270 cand = round_down(this_end - size, align);
271 if (cand >= this_start)
272 return cand;
273 }
274
275 return 0;
276 }
277
278 /**
279 * memblock_find_in_range_node - find free area in given range and node
280 * @size: size of free area to find
281 * @align: alignment of free area to find
282 * @start: start of candidate range
283 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
284 * %MEMBLOCK_ALLOC_ACCESSIBLE
285 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
286 * @flags: pick from blocks based on memory attributes
287 *
288 * Find @size free area aligned to @align in the specified range and node.
289 *
290 * Return:
291 * Found address on success, 0 on failure.
292 */
memblock_find_in_range_node(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,enum memblock_flags flags)293 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
294 phys_addr_t align, phys_addr_t start,
295 phys_addr_t end, int nid,
296 enum memblock_flags flags)
297 {
298 /* pump up @end */
299 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
300 end == MEMBLOCK_ALLOC_NOLEAKTRACE)
301 end = memblock.current_limit;
302
303 /* avoid allocating the first page */
304 start = max_t(phys_addr_t, start, PAGE_SIZE);
305 end = max(start, end);
306
307 if (memblock_bottom_up())
308 return __memblock_find_range_bottom_up(start, end, size, align,
309 nid, flags);
310 else
311 return __memblock_find_range_top_down(start, end, size, align,
312 nid, flags);
313 }
314
315 /**
316 * memblock_find_in_range - find free area in given range
317 * @start: start of candidate range
318 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
319 * %MEMBLOCK_ALLOC_ACCESSIBLE
320 * @size: size of free area to find
321 * @align: alignment of free area to find
322 *
323 * Find @size free area aligned to @align in the specified range.
324 *
325 * Return:
326 * Found address on success, 0 on failure.
327 */
memblock_find_in_range(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align)328 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
329 phys_addr_t end, phys_addr_t size,
330 phys_addr_t align)
331 {
332 phys_addr_t ret;
333 enum memblock_flags flags = choose_memblock_flags();
334
335 again:
336 ret = memblock_find_in_range_node(size, align, start, end,
337 NUMA_NO_NODE, flags);
338
339 if (!ret && (flags & MEMBLOCK_MIRROR)) {
340 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
341 &size);
342 flags &= ~MEMBLOCK_MIRROR;
343 goto again;
344 }
345
346 return ret;
347 }
348
memblock_remove_region(struct memblock_type * type,unsigned long r)349 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
350 {
351 type->total_size -= type->regions[r].size;
352 memmove(&type->regions[r], &type->regions[r + 1],
353 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
354 type->cnt--;
355
356 /* Special case for empty arrays */
357 if (type->cnt == 0) {
358 WARN_ON(type->total_size != 0);
359 type->cnt = 1;
360 type->regions[0].base = 0;
361 type->regions[0].size = 0;
362 type->regions[0].flags = 0;
363 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
364 }
365 }
366
367 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
368 /**
369 * memblock_discard - discard memory and reserved arrays if they were allocated
370 */
memblock_discard(void)371 void __init memblock_discard(void)
372 {
373 phys_addr_t addr, size;
374
375 if (memblock.reserved.regions != memblock_reserved_init_regions) {
376 addr = __pa(memblock.reserved.regions);
377 size = PAGE_ALIGN(sizeof(struct memblock_region) *
378 memblock.reserved.max);
379 if (memblock_reserved_in_slab)
380 kfree(memblock.reserved.regions);
381 else
382 memblock_free_late(addr, size);
383 }
384
385 if (memblock.memory.regions != memblock_memory_init_regions) {
386 addr = __pa(memblock.memory.regions);
387 size = PAGE_ALIGN(sizeof(struct memblock_region) *
388 memblock.memory.max);
389 if (memblock_memory_in_slab)
390 kfree(memblock.memory.regions);
391 else
392 memblock_free_late(addr, size);
393 }
394
395 memblock_memory = NULL;
396 }
397 #endif
398
399 /**
400 * memblock_double_array - double the size of the memblock regions array
401 * @type: memblock type of the regions array being doubled
402 * @new_area_start: starting address of memory range to avoid overlap with
403 * @new_area_size: size of memory range to avoid overlap with
404 *
405 * Double the size of the @type regions array. If memblock is being used to
406 * allocate memory for a new reserved regions array and there is a previously
407 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
408 * waiting to be reserved, ensure the memory used by the new array does
409 * not overlap.
410 *
411 * Return:
412 * 0 on success, -1 on failure.
413 */
memblock_double_array(struct memblock_type * type,phys_addr_t new_area_start,phys_addr_t new_area_size)414 static int __init_memblock memblock_double_array(struct memblock_type *type,
415 phys_addr_t new_area_start,
416 phys_addr_t new_area_size)
417 {
418 struct memblock_region *new_array, *old_array;
419 phys_addr_t old_alloc_size, new_alloc_size;
420 phys_addr_t old_size, new_size, addr, new_end;
421 int use_slab = slab_is_available();
422 int *in_slab;
423
424 /* We don't allow resizing until we know about the reserved regions
425 * of memory that aren't suitable for allocation
426 */
427 if (!memblock_can_resize)
428 return -1;
429
430 /* Calculate new doubled size */
431 old_size = type->max * sizeof(struct memblock_region);
432 new_size = old_size << 1;
433 /*
434 * We need to allocated new one align to PAGE_SIZE,
435 * so we can free them completely later.
436 */
437 old_alloc_size = PAGE_ALIGN(old_size);
438 new_alloc_size = PAGE_ALIGN(new_size);
439
440 /* Retrieve the slab flag */
441 if (type == &memblock.memory)
442 in_slab = &memblock_memory_in_slab;
443 else
444 in_slab = &memblock_reserved_in_slab;
445
446 /* Try to find some space for it */
447 if (use_slab) {
448 new_array = kmalloc(new_size, GFP_KERNEL);
449 addr = new_array ? __pa(new_array) : 0;
450 } else {
451 /* only exclude range when trying to double reserved.regions */
452 if (type != &memblock.reserved)
453 new_area_start = new_area_size = 0;
454
455 addr = memblock_find_in_range(new_area_start + new_area_size,
456 memblock.current_limit,
457 new_alloc_size, PAGE_SIZE);
458 if (!addr && new_area_size)
459 addr = memblock_find_in_range(0,
460 min(new_area_start, memblock.current_limit),
461 new_alloc_size, PAGE_SIZE);
462
463 new_array = addr ? __va(addr) : NULL;
464 }
465 if (!addr) {
466 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
467 type->name, type->max, type->max * 2);
468 return -1;
469 }
470
471 new_end = addr + new_size - 1;
472 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
473 type->name, type->max * 2, &addr, &new_end);
474
475 /*
476 * Found space, we now need to move the array over before we add the
477 * reserved region since it may be our reserved array itself that is
478 * full.
479 */
480 memcpy(new_array, type->regions, old_size);
481 memset(new_array + type->max, 0, old_size);
482 old_array = type->regions;
483 type->regions = new_array;
484 type->max <<= 1;
485
486 /* Free old array. We needn't free it if the array is the static one */
487 if (*in_slab)
488 kfree(old_array);
489 else if (old_array != memblock_memory_init_regions &&
490 old_array != memblock_reserved_init_regions)
491 memblock_free(old_array, old_alloc_size);
492
493 /*
494 * Reserve the new array if that comes from the memblock. Otherwise, we
495 * needn't do it
496 */
497 if (!use_slab)
498 BUG_ON(memblock_reserve(addr, new_alloc_size));
499
500 /* Update slab flag */
501 *in_slab = use_slab;
502
503 return 0;
504 }
505
506 /**
507 * memblock_merge_regions - merge neighboring compatible regions
508 * @type: memblock type to scan
509 * @start_rgn: start scanning from (@start_rgn - 1)
510 * @end_rgn: end scanning at (@end_rgn - 1)
511 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
512 */
memblock_merge_regions(struct memblock_type * type,unsigned long start_rgn,unsigned long end_rgn)513 static void __init_memblock memblock_merge_regions(struct memblock_type *type,
514 unsigned long start_rgn,
515 unsigned long end_rgn)
516 {
517 int i = 0;
518 if (start_rgn)
519 i = start_rgn - 1;
520 end_rgn = min(end_rgn, type->cnt - 1);
521 while (i < end_rgn) {
522 struct memblock_region *this = &type->regions[i];
523 struct memblock_region *next = &type->regions[i + 1];
524
525 if (this->base + this->size != next->base ||
526 memblock_get_region_node(this) !=
527 memblock_get_region_node(next) ||
528 this->flags != next->flags) {
529 BUG_ON(this->base + this->size > next->base);
530 i++;
531 continue;
532 }
533
534 this->size += next->size;
535 /* move forward from next + 1, index of which is i + 2 */
536 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
537 type->cnt--;
538 end_rgn--;
539 }
540 }
541
542 /**
543 * memblock_insert_region - insert new memblock region
544 * @type: memblock type to insert into
545 * @idx: index for the insertion point
546 * @base: base address of the new region
547 * @size: size of the new region
548 * @nid: node id of the new region
549 * @flags: flags of the new region
550 *
551 * Insert new memblock region [@base, @base + @size) into @type at @idx.
552 * @type must already have extra room to accommodate the new region.
553 */
memblock_insert_region(struct memblock_type * type,int idx,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)554 static void __init_memblock memblock_insert_region(struct memblock_type *type,
555 int idx, phys_addr_t base,
556 phys_addr_t size,
557 int nid,
558 enum memblock_flags flags)
559 {
560 struct memblock_region *rgn = &type->regions[idx];
561
562 BUG_ON(type->cnt >= type->max);
563 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
564 rgn->base = base;
565 rgn->size = size;
566 rgn->flags = flags;
567 memblock_set_region_node(rgn, nid);
568 type->cnt++;
569 type->total_size += size;
570 }
571
572 /**
573 * memblock_add_range - add new memblock region
574 * @type: memblock type to add new region into
575 * @base: base address of the new region
576 * @size: size of the new region
577 * @nid: nid of the new region
578 * @flags: flags of the new region
579 *
580 * Add new memblock region [@base, @base + @size) into @type. The new region
581 * is allowed to overlap with existing ones - overlaps don't affect already
582 * existing regions. @type is guaranteed to be minimal (all neighbouring
583 * compatible regions are merged) after the addition.
584 *
585 * Return:
586 * 0 on success, -errno on failure.
587 */
memblock_add_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)588 static int __init_memblock memblock_add_range(struct memblock_type *type,
589 phys_addr_t base, phys_addr_t size,
590 int nid, enum memblock_flags flags)
591 {
592 bool insert = false;
593 phys_addr_t obase = base;
594 phys_addr_t end = base + memblock_cap_size(base, &size);
595 int idx, nr_new, start_rgn = -1, end_rgn;
596 struct memblock_region *rgn;
597
598 if (!size)
599 return 0;
600
601 /* special case for empty array */
602 if (type->regions[0].size == 0) {
603 WARN_ON(type->cnt != 1 || type->total_size);
604 type->regions[0].base = base;
605 type->regions[0].size = size;
606 type->regions[0].flags = flags;
607 memblock_set_region_node(&type->regions[0], nid);
608 type->total_size = size;
609 return 0;
610 }
611
612 /*
613 * The worst case is when new range overlaps all existing regions,
614 * then we'll need type->cnt + 1 empty regions in @type. So if
615 * type->cnt * 2 + 1 is less than or equal to type->max, we know
616 * that there is enough empty regions in @type, and we can insert
617 * regions directly.
618 */
619 if (type->cnt * 2 + 1 <= type->max)
620 insert = true;
621
622 repeat:
623 /*
624 * The following is executed twice. Once with %false @insert and
625 * then with %true. The first counts the number of regions needed
626 * to accommodate the new area. The second actually inserts them.
627 */
628 base = obase;
629 nr_new = 0;
630
631 for_each_memblock_type(idx, type, rgn) {
632 phys_addr_t rbase = rgn->base;
633 phys_addr_t rend = rbase + rgn->size;
634
635 if (rbase >= end)
636 break;
637 if (rend <= base)
638 continue;
639 /*
640 * @rgn overlaps. If it separates the lower part of new
641 * area, insert that portion.
642 */
643 if (rbase > base) {
644 #ifdef CONFIG_NUMA
645 WARN_ON(nid != memblock_get_region_node(rgn));
646 #endif
647 WARN_ON(flags != rgn->flags);
648 nr_new++;
649 if (insert) {
650 if (start_rgn == -1)
651 start_rgn = idx;
652 end_rgn = idx + 1;
653 memblock_insert_region(type, idx++, base,
654 rbase - base, nid,
655 flags);
656 }
657 }
658 /* area below @rend is dealt with, forget about it */
659 base = min(rend, end);
660 }
661
662 /* insert the remaining portion */
663 if (base < end) {
664 nr_new++;
665 if (insert) {
666 if (start_rgn == -1)
667 start_rgn = idx;
668 end_rgn = idx + 1;
669 memblock_insert_region(type, idx, base, end - base,
670 nid, flags);
671 }
672 }
673
674 if (!nr_new)
675 return 0;
676
677 /*
678 * If this was the first round, resize array and repeat for actual
679 * insertions; otherwise, merge and return.
680 */
681 if (!insert) {
682 while (type->cnt + nr_new > type->max)
683 if (memblock_double_array(type, obase, size) < 0)
684 return -ENOMEM;
685 insert = true;
686 goto repeat;
687 } else {
688 memblock_merge_regions(type, start_rgn, end_rgn);
689 return 0;
690 }
691 }
692
693 /**
694 * memblock_add_node - add new memblock region within a NUMA node
695 * @base: base address of the new region
696 * @size: size of the new region
697 * @nid: nid of the new region
698 * @flags: flags of the new region
699 *
700 * Add new memblock region [@base, @base + @size) to the "memory"
701 * type. See memblock_add_range() description for mode details
702 *
703 * Return:
704 * 0 on success, -errno on failure.
705 */
memblock_add_node(phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)706 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
707 int nid, enum memblock_flags flags)
708 {
709 phys_addr_t end = base + size - 1;
710
711 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
712 &base, &end, nid, flags, (void *)_RET_IP_);
713
714 return memblock_add_range(&memblock.memory, base, size, nid, flags);
715 }
716
717 /**
718 * memblock_add - add new memblock region
719 * @base: base address of the new region
720 * @size: size of the new region
721 *
722 * Add new memblock region [@base, @base + @size) to the "memory"
723 * type. See memblock_add_range() description for mode details
724 *
725 * Return:
726 * 0 on success, -errno on failure.
727 */
memblock_add(phys_addr_t base,phys_addr_t size)728 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
729 {
730 phys_addr_t end = base + size - 1;
731
732 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
733 &base, &end, (void *)_RET_IP_);
734
735 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
736 }
737
738 /**
739 * memblock_validate_numa_coverage - check if amount of memory with
740 * no node ID assigned is less than a threshold
741 * @threshold_bytes: maximal memory size that can have unassigned node
742 * ID (in bytes).
743 *
744 * A buggy firmware may report memory that does not belong to any node.
745 * Check if amount of such memory is below @threshold_bytes.
746 *
747 * Return: true on success, false on failure.
748 */
memblock_validate_numa_coverage(unsigned long threshold_bytes)749 bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
750 {
751 unsigned long nr_pages = 0;
752 unsigned long start_pfn, end_pfn, mem_size_mb;
753 int nid, i;
754
755 /* calculate lose page */
756 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
757 if (!numa_valid_node(nid))
758 nr_pages += end_pfn - start_pfn;
759 }
760
761 if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
762 mem_size_mb = memblock_phys_mem_size() >> 20;
763 pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
764 (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
765 return false;
766 }
767
768 return true;
769 }
770
771
772 /**
773 * memblock_isolate_range - isolate given range into disjoint memblocks
774 * @type: memblock type to isolate range for
775 * @base: base of range to isolate
776 * @size: size of range to isolate
777 * @start_rgn: out parameter for the start of isolated region
778 * @end_rgn: out parameter for the end of isolated region
779 *
780 * Walk @type and ensure that regions don't cross the boundaries defined by
781 * [@base, @base + @size). Crossing regions are split at the boundaries,
782 * which may create at most two more regions. The index of the first
783 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
784 *
785 * Return:
786 * 0 on success, -errno on failure.
787 */
memblock_isolate_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int * start_rgn,int * end_rgn)788 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
789 phys_addr_t base, phys_addr_t size,
790 int *start_rgn, int *end_rgn)
791 {
792 phys_addr_t end = base + memblock_cap_size(base, &size);
793 int idx;
794 struct memblock_region *rgn;
795
796 *start_rgn = *end_rgn = 0;
797
798 if (!size)
799 return 0;
800
801 /* we'll create at most two more regions */
802 while (type->cnt + 2 > type->max)
803 if (memblock_double_array(type, base, size) < 0)
804 return -ENOMEM;
805
806 for_each_memblock_type(idx, type, rgn) {
807 phys_addr_t rbase = rgn->base;
808 phys_addr_t rend = rbase + rgn->size;
809
810 if (rbase >= end)
811 break;
812 if (rend <= base)
813 continue;
814
815 if (rbase < base) {
816 /*
817 * @rgn intersects from below. Split and continue
818 * to process the next region - the new top half.
819 */
820 rgn->base = base;
821 rgn->size -= base - rbase;
822 type->total_size -= base - rbase;
823 memblock_insert_region(type, idx, rbase, base - rbase,
824 memblock_get_region_node(rgn),
825 rgn->flags);
826 } else if (rend > end) {
827 /*
828 * @rgn intersects from above. Split and redo the
829 * current region - the new bottom half.
830 */
831 rgn->base = end;
832 rgn->size -= end - rbase;
833 type->total_size -= end - rbase;
834 memblock_insert_region(type, idx--, rbase, end - rbase,
835 memblock_get_region_node(rgn),
836 rgn->flags);
837 } else {
838 /* @rgn is fully contained, record it */
839 if (!*end_rgn)
840 *start_rgn = idx;
841 *end_rgn = idx + 1;
842 }
843 }
844
845 return 0;
846 }
847
memblock_remove_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size)848 static int __init_memblock memblock_remove_range(struct memblock_type *type,
849 phys_addr_t base, phys_addr_t size)
850 {
851 int start_rgn, end_rgn;
852 int i, ret;
853
854 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
855 if (ret)
856 return ret;
857
858 for (i = end_rgn - 1; i >= start_rgn; i--)
859 memblock_remove_region(type, i);
860 return 0;
861 }
862
memblock_remove(phys_addr_t base,phys_addr_t size)863 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
864 {
865 phys_addr_t end = base + size - 1;
866
867 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
868 &base, &end, (void *)_RET_IP_);
869
870 return memblock_remove_range(&memblock.memory, base, size);
871 }
872
873 /**
874 * memblock_free - free boot memory allocation
875 * @ptr: starting address of the boot memory allocation
876 * @size: size of the boot memory block in bytes
877 *
878 * Free boot memory block previously allocated by memblock_alloc_xx() API.
879 * The freeing memory will not be released to the buddy allocator.
880 */
memblock_free(void * ptr,size_t size)881 void __init_memblock memblock_free(void *ptr, size_t size)
882 {
883 if (ptr)
884 memblock_phys_free(__pa(ptr), size);
885 }
886
887 /**
888 * memblock_phys_free - free boot memory block
889 * @base: phys starting address of the boot memory block
890 * @size: size of the boot memory block in bytes
891 *
892 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
893 * The freeing memory will not be released to the buddy allocator.
894 */
memblock_phys_free(phys_addr_t base,phys_addr_t size)895 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
896 {
897 phys_addr_t end = base + size - 1;
898
899 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
900 &base, &end, (void *)_RET_IP_);
901
902 kmemleak_free_part_phys(base, size);
903 return memblock_remove_range(&memblock.reserved, base, size);
904 }
905
memblock_reserve(phys_addr_t base,phys_addr_t size)906 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
907 {
908 phys_addr_t end = base + size - 1;
909
910 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
911 &base, &end, (void *)_RET_IP_);
912
913 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
914 }
915
916 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
memblock_physmem_add(phys_addr_t base,phys_addr_t size)917 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
918 {
919 phys_addr_t end = base + size - 1;
920
921 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
922 &base, &end, (void *)_RET_IP_);
923
924 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
925 }
926 #endif
927
928 /**
929 * memblock_setclr_flag - set or clear flag for a memory region
930 * @base: base address of the region
931 * @size: size of the region
932 * @set: set or clear the flag
933 * @flag: the flag to update
934 *
935 * This function isolates region [@base, @base + @size), and sets/clears flag
936 *
937 * Return: 0 on success, -errno on failure.
938 */
memblock_setclr_flag(phys_addr_t base,phys_addr_t size,int set,int flag)939 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
940 phys_addr_t size, int set, int flag)
941 {
942 struct memblock_type *type = &memblock.memory;
943 int i, ret, start_rgn, end_rgn;
944
945 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
946 if (ret)
947 return ret;
948
949 for (i = start_rgn; i < end_rgn; i++) {
950 struct memblock_region *r = &type->regions[i];
951
952 if (set)
953 r->flags |= flag;
954 else
955 r->flags &= ~flag;
956 }
957
958 memblock_merge_regions(type, start_rgn, end_rgn);
959 return 0;
960 }
961
962 /**
963 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
964 * @base: the base phys addr of the region
965 * @size: the size of the region
966 *
967 * Return: 0 on success, -errno on failure.
968 */
memblock_mark_hotplug(phys_addr_t base,phys_addr_t size)969 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
970 {
971 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
972 }
973
974 /**
975 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
976 * @base: the base phys addr of the region
977 * @size: the size of the region
978 *
979 * Return: 0 on success, -errno on failure.
980 */
memblock_clear_hotplug(phys_addr_t base,phys_addr_t size)981 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
982 {
983 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
984 }
985
986 /**
987 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
988 * @base: the base phys addr of the region
989 * @size: the size of the region
990 *
991 * Return: 0 on success, -errno on failure.
992 */
memblock_mark_mirror(phys_addr_t base,phys_addr_t size)993 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
994 {
995 if (!mirrored_kernelcore)
996 return 0;
997
998 system_has_some_mirror = true;
999
1000 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
1001 }
1002
1003 /**
1004 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1005 * @base: the base phys addr of the region
1006 * @size: the size of the region
1007 *
1008 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1009 * direct mapping of the physical memory. These regions will still be
1010 * covered by the memory map. The struct page representing NOMAP memory
1011 * frames in the memory map will be PageReserved()
1012 *
1013 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1014 * memblock, the caller must inform kmemleak to ignore that memory
1015 *
1016 * Return: 0 on success, -errno on failure.
1017 */
memblock_mark_nomap(phys_addr_t base,phys_addr_t size)1018 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
1019 {
1020 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
1021 }
1022
1023 /**
1024 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1025 * @base: the base phys addr of the region
1026 * @size: the size of the region
1027 *
1028 * Return: 0 on success, -errno on failure.
1029 */
memblock_clear_nomap(phys_addr_t base,phys_addr_t size)1030 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
1031 {
1032 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
1033 }
1034
should_skip_region(struct memblock_type * type,struct memblock_region * m,int nid,int flags)1035 static bool should_skip_region(struct memblock_type *type,
1036 struct memblock_region *m,
1037 int nid, int flags)
1038 {
1039 int m_nid = memblock_get_region_node(m);
1040
1041 /* we never skip regions when iterating memblock.reserved or physmem */
1042 if (type != memblock_memory)
1043 return false;
1044
1045 /* only memory regions are associated with nodes, check it */
1046 if (numa_valid_node(nid) && nid != m_nid)
1047 return true;
1048
1049 /* skip hotpluggable memory regions if needed */
1050 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
1051 !(flags & MEMBLOCK_HOTPLUG))
1052 return true;
1053
1054 /* if we want mirror memory skip non-mirror memory regions */
1055 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1056 return true;
1057
1058 /* skip nomap memory unless we were asked for it explicitly */
1059 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1060 return true;
1061
1062 /* skip driver-managed memory unless we were asked for it explicitly */
1063 if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
1064 return true;
1065
1066 return false;
1067 }
1068
1069 /**
1070 * __next_mem_range - next function for for_each_free_mem_range() etc.
1071 * @idx: pointer to u64 loop variable
1072 * @nid: node selector, %NUMA_NO_NODE for all nodes
1073 * @flags: pick from blocks based on memory attributes
1074 * @type_a: pointer to memblock_type from where the range is taken
1075 * @type_b: pointer to memblock_type which excludes memory from being taken
1076 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1077 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1078 * @out_nid: ptr to int for nid of the range, can be %NULL
1079 *
1080 * Find the first area from *@idx which matches @nid, fill the out
1081 * parameters, and update *@idx for the next iteration. The lower 32bit of
1082 * *@idx contains index into type_a and the upper 32bit indexes the
1083 * areas before each region in type_b. For example, if type_b regions
1084 * look like the following,
1085 *
1086 * 0:[0-16), 1:[32-48), 2:[128-130)
1087 *
1088 * The upper 32bit indexes the following regions.
1089 *
1090 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1091 *
1092 * As both region arrays are sorted, the function advances the two indices
1093 * in lockstep and returns each intersection.
1094 */
__next_mem_range(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)1095 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1096 struct memblock_type *type_a,
1097 struct memblock_type *type_b, phys_addr_t *out_start,
1098 phys_addr_t *out_end, int *out_nid)
1099 {
1100 int idx_a = *idx & 0xffffffff;
1101 int idx_b = *idx >> 32;
1102
1103 for (; idx_a < type_a->cnt; idx_a++) {
1104 struct memblock_region *m = &type_a->regions[idx_a];
1105
1106 phys_addr_t m_start = m->base;
1107 phys_addr_t m_end = m->base + m->size;
1108 int m_nid = memblock_get_region_node(m);
1109
1110 if (should_skip_region(type_a, m, nid, flags))
1111 continue;
1112
1113 if (!type_b) {
1114 if (out_start)
1115 *out_start = m_start;
1116 if (out_end)
1117 *out_end = m_end;
1118 if (out_nid)
1119 *out_nid = m_nid;
1120 idx_a++;
1121 *idx = (u32)idx_a | (u64)idx_b << 32;
1122 return;
1123 }
1124
1125 /* scan areas before each reservation */
1126 for (; idx_b < type_b->cnt + 1; idx_b++) {
1127 struct memblock_region *r;
1128 phys_addr_t r_start;
1129 phys_addr_t r_end;
1130
1131 r = &type_b->regions[idx_b];
1132 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1133 r_end = idx_b < type_b->cnt ?
1134 r->base : PHYS_ADDR_MAX;
1135
1136 /*
1137 * if idx_b advanced past idx_a,
1138 * break out to advance idx_a
1139 */
1140 if (r_start >= m_end)
1141 break;
1142 /* if the two regions intersect, we're done */
1143 if (m_start < r_end) {
1144 if (out_start)
1145 *out_start =
1146 max(m_start, r_start);
1147 if (out_end)
1148 *out_end = min(m_end, r_end);
1149 if (out_nid)
1150 *out_nid = m_nid;
1151 /*
1152 * The region which ends first is
1153 * advanced for the next iteration.
1154 */
1155 if (m_end <= r_end)
1156 idx_a++;
1157 else
1158 idx_b++;
1159 *idx = (u32)idx_a | (u64)idx_b << 32;
1160 return;
1161 }
1162 }
1163 }
1164
1165 /* signal end of iteration */
1166 *idx = ULLONG_MAX;
1167 }
1168
1169 /**
1170 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1171 *
1172 * @idx: pointer to u64 loop variable
1173 * @nid: node selector, %NUMA_NO_NODE for all nodes
1174 * @flags: pick from blocks based on memory attributes
1175 * @type_a: pointer to memblock_type from where the range is taken
1176 * @type_b: pointer to memblock_type which excludes memory from being taken
1177 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1178 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1179 * @out_nid: ptr to int for nid of the range, can be %NULL
1180 *
1181 * Finds the next range from type_a which is not marked as unsuitable
1182 * in type_b.
1183 *
1184 * Reverse of __next_mem_range().
1185 */
__next_mem_range_rev(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)1186 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1187 enum memblock_flags flags,
1188 struct memblock_type *type_a,
1189 struct memblock_type *type_b,
1190 phys_addr_t *out_start,
1191 phys_addr_t *out_end, int *out_nid)
1192 {
1193 int idx_a = *idx & 0xffffffff;
1194 int idx_b = *idx >> 32;
1195
1196 if (*idx == (u64)ULLONG_MAX) {
1197 idx_a = type_a->cnt - 1;
1198 if (type_b != NULL)
1199 idx_b = type_b->cnt;
1200 else
1201 idx_b = 0;
1202 }
1203
1204 for (; idx_a >= 0; idx_a--) {
1205 struct memblock_region *m = &type_a->regions[idx_a];
1206
1207 phys_addr_t m_start = m->base;
1208 phys_addr_t m_end = m->base + m->size;
1209 int m_nid = memblock_get_region_node(m);
1210
1211 if (should_skip_region(type_a, m, nid, flags))
1212 continue;
1213
1214 if (!type_b) {
1215 if (out_start)
1216 *out_start = m_start;
1217 if (out_end)
1218 *out_end = m_end;
1219 if (out_nid)
1220 *out_nid = m_nid;
1221 idx_a--;
1222 *idx = (u32)idx_a | (u64)idx_b << 32;
1223 return;
1224 }
1225
1226 /* scan areas before each reservation */
1227 for (; idx_b >= 0; idx_b--) {
1228 struct memblock_region *r;
1229 phys_addr_t r_start;
1230 phys_addr_t r_end;
1231
1232 r = &type_b->regions[idx_b];
1233 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1234 r_end = idx_b < type_b->cnt ?
1235 r->base : PHYS_ADDR_MAX;
1236 /*
1237 * if idx_b advanced past idx_a,
1238 * break out to advance idx_a
1239 */
1240
1241 if (r_end <= m_start)
1242 break;
1243 /* if the two regions intersect, we're done */
1244 if (m_end > r_start) {
1245 if (out_start)
1246 *out_start = max(m_start, r_start);
1247 if (out_end)
1248 *out_end = min(m_end, r_end);
1249 if (out_nid)
1250 *out_nid = m_nid;
1251 if (m_start >= r_start)
1252 idx_a--;
1253 else
1254 idx_b--;
1255 *idx = (u32)idx_a | (u64)idx_b << 32;
1256 return;
1257 }
1258 }
1259 }
1260 /* signal end of iteration */
1261 *idx = ULLONG_MAX;
1262 }
1263
1264 /*
1265 * Common iterator interface used to define for_each_mem_pfn_range().
1266 */
__next_mem_pfn_range(int * idx,int nid,unsigned long * out_start_pfn,unsigned long * out_end_pfn,int * out_nid)1267 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1268 unsigned long *out_start_pfn,
1269 unsigned long *out_end_pfn, int *out_nid)
1270 {
1271 struct memblock_type *type = &memblock.memory;
1272 struct memblock_region *r;
1273 int r_nid;
1274
1275 while (++*idx < type->cnt) {
1276 r = &type->regions[*idx];
1277 r_nid = memblock_get_region_node(r);
1278
1279 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1280 continue;
1281 if (!numa_valid_node(nid) || nid == r_nid)
1282 break;
1283 }
1284 if (*idx >= type->cnt) {
1285 *idx = -1;
1286 return;
1287 }
1288
1289 if (out_start_pfn)
1290 *out_start_pfn = PFN_UP(r->base);
1291 if (out_end_pfn)
1292 *out_end_pfn = PFN_DOWN(r->base + r->size);
1293 if (out_nid)
1294 *out_nid = r_nid;
1295 }
1296
1297 /**
1298 * memblock_set_node - set node ID on memblock regions
1299 * @base: base of area to set node ID for
1300 * @size: size of area to set node ID for
1301 * @type: memblock type to set node ID for
1302 * @nid: node ID to set
1303 *
1304 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1305 * Regions which cross the area boundaries are split as necessary.
1306 *
1307 * Return:
1308 * 0 on success, -errno on failure.
1309 */
memblock_set_node(phys_addr_t base,phys_addr_t size,struct memblock_type * type,int nid)1310 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1311 struct memblock_type *type, int nid)
1312 {
1313 #ifdef CONFIG_NUMA
1314 int start_rgn, end_rgn;
1315 int i, ret;
1316
1317 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1318 if (ret)
1319 return ret;
1320
1321 for (i = start_rgn; i < end_rgn; i++)
1322 memblock_set_region_node(&type->regions[i], nid);
1323
1324 memblock_merge_regions(type, start_rgn, end_rgn);
1325 #endif
1326 return 0;
1327 }
1328
1329 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1330 /**
1331 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1332 *
1333 * @idx: pointer to u64 loop variable
1334 * @zone: zone in which all of the memory blocks reside
1335 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1336 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1337 *
1338 * This function is meant to be a zone/pfn specific wrapper for the
1339 * for_each_mem_range type iterators. Specifically they are used in the
1340 * deferred memory init routines and as such we were duplicating much of
1341 * this logic throughout the code. So instead of having it in multiple
1342 * locations it seemed like it would make more sense to centralize this to
1343 * one new iterator that does everything they need.
1344 */
1345 void __init_memblock
__next_mem_pfn_range_in_zone(u64 * idx,struct zone * zone,unsigned long * out_spfn,unsigned long * out_epfn)1346 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1347 unsigned long *out_spfn, unsigned long *out_epfn)
1348 {
1349 int zone_nid = zone_to_nid(zone);
1350 phys_addr_t spa, epa;
1351
1352 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1353 &memblock.memory, &memblock.reserved,
1354 &spa, &epa, NULL);
1355
1356 while (*idx != U64_MAX) {
1357 unsigned long epfn = PFN_DOWN(epa);
1358 unsigned long spfn = PFN_UP(spa);
1359
1360 /*
1361 * Verify the end is at least past the start of the zone and
1362 * that we have at least one PFN to initialize.
1363 */
1364 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1365 /* if we went too far just stop searching */
1366 if (zone_end_pfn(zone) <= spfn) {
1367 *idx = U64_MAX;
1368 break;
1369 }
1370
1371 if (out_spfn)
1372 *out_spfn = max(zone->zone_start_pfn, spfn);
1373 if (out_epfn)
1374 *out_epfn = min(zone_end_pfn(zone), epfn);
1375
1376 return;
1377 }
1378
1379 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1380 &memblock.memory, &memblock.reserved,
1381 &spa, &epa, NULL);
1382 }
1383
1384 /* signal end of iteration */
1385 if (out_spfn)
1386 *out_spfn = ULONG_MAX;
1387 if (out_epfn)
1388 *out_epfn = 0;
1389 }
1390
1391 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1392
1393 /**
1394 * memblock_alloc_range_nid - allocate boot memory block
1395 * @size: size of memory block to be allocated in bytes
1396 * @align: alignment of the region and block's size
1397 * @start: the lower bound of the memory region to allocate (phys address)
1398 * @end: the upper bound of the memory region to allocate (phys address)
1399 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1400 * @exact_nid: control the allocation fall back to other nodes
1401 *
1402 * The allocation is performed from memory region limited by
1403 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1404 *
1405 * If the specified node can not hold the requested memory and @exact_nid
1406 * is false, the allocation falls back to any node in the system.
1407 *
1408 * For systems with memory mirroring, the allocation is attempted first
1409 * from the regions with mirroring enabled and then retried from any
1410 * memory region.
1411 *
1412 * In addition, function using kmemleak_alloc_phys for allocated boot
1413 * memory block, it is never reported as leaks.
1414 *
1415 * Return:
1416 * Physical address of allocated memory block on success, %0 on failure.
1417 */
memblock_alloc_range_nid(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,bool exact_nid)1418 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1419 phys_addr_t align, phys_addr_t start,
1420 phys_addr_t end, int nid,
1421 bool exact_nid)
1422 {
1423 enum memblock_flags flags = choose_memblock_flags();
1424 phys_addr_t found;
1425
1426 if (!align) {
1427 /* Can't use WARNs this early in boot on powerpc */
1428 dump_stack();
1429 align = SMP_CACHE_BYTES;
1430 }
1431
1432 again:
1433 found = memblock_find_in_range_node(size, align, start, end, nid,
1434 flags);
1435 if (found && !memblock_reserve(found, size))
1436 goto done;
1437
1438 if (numa_valid_node(nid) && !exact_nid) {
1439 found = memblock_find_in_range_node(size, align, start,
1440 end, NUMA_NO_NODE,
1441 flags);
1442 if (found && !memblock_reserve(found, size))
1443 goto done;
1444 }
1445
1446 if (flags & MEMBLOCK_MIRROR) {
1447 flags &= ~MEMBLOCK_MIRROR;
1448 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1449 &size);
1450 goto again;
1451 }
1452
1453 return 0;
1454
1455 done:
1456 /*
1457 * Skip kmemleak for those places like kasan_init() and
1458 * early_pgtable_alloc() due to high volume.
1459 */
1460 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1461 /*
1462 * Memblock allocated blocks are never reported as
1463 * leaks. This is because many of these blocks are
1464 * only referred via the physical address which is
1465 * not looked up by kmemleak.
1466 */
1467 kmemleak_alloc_phys(found, size, 0);
1468
1469 /*
1470 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1471 * require memory to be accepted before it can be used by the
1472 * guest.
1473 *
1474 * Accept the memory of the allocated buffer.
1475 */
1476 accept_memory(found, found + size);
1477
1478 return found;
1479 }
1480
1481 /**
1482 * memblock_phys_alloc_range - allocate a memory block inside specified range
1483 * @size: size of memory block to be allocated in bytes
1484 * @align: alignment of the region and block's size
1485 * @start: the lower bound of the memory region to allocate (physical address)
1486 * @end: the upper bound of the memory region to allocate (physical address)
1487 *
1488 * Allocate @size bytes in the between @start and @end.
1489 *
1490 * Return: physical address of the allocated memory block on success,
1491 * %0 on failure.
1492 */
memblock_phys_alloc_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end)1493 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1494 phys_addr_t align,
1495 phys_addr_t start,
1496 phys_addr_t end)
1497 {
1498 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1499 __func__, (u64)size, (u64)align, &start, &end,
1500 (void *)_RET_IP_);
1501 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1502 false);
1503 }
1504
1505 /**
1506 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1507 * @size: size of memory block to be allocated in bytes
1508 * @align: alignment of the region and block's size
1509 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1510 *
1511 * Allocates memory block from the specified NUMA node. If the node
1512 * has no available memory, attempts to allocated from any node in the
1513 * system.
1514 *
1515 * Return: physical address of the allocated memory block on success,
1516 * %0 on failure.
1517 */
memblock_phys_alloc_try_nid(phys_addr_t size,phys_addr_t align,int nid)1518 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1519 {
1520 return memblock_alloc_range_nid(size, align, 0,
1521 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1522 }
1523
1524 /**
1525 * memblock_alloc_internal - allocate boot memory block
1526 * @size: size of memory block to be allocated in bytes
1527 * @align: alignment of the region and block's size
1528 * @min_addr: the lower bound of the memory region to allocate (phys address)
1529 * @max_addr: the upper bound of the memory region to allocate (phys address)
1530 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1531 * @exact_nid: control the allocation fall back to other nodes
1532 *
1533 * Allocates memory block using memblock_alloc_range_nid() and
1534 * converts the returned physical address to virtual.
1535 *
1536 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1537 * will fall back to memory below @min_addr. Other constraints, such
1538 * as node and mirrored memory will be handled again in
1539 * memblock_alloc_range_nid().
1540 *
1541 * Return:
1542 * Virtual address of allocated memory block on success, NULL on failure.
1543 */
memblock_alloc_internal(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid,bool exact_nid)1544 static void * __init memblock_alloc_internal(
1545 phys_addr_t size, phys_addr_t align,
1546 phys_addr_t min_addr, phys_addr_t max_addr,
1547 int nid, bool exact_nid)
1548 {
1549 phys_addr_t alloc;
1550
1551 /*
1552 * Detect any accidental use of these APIs after slab is ready, as at
1553 * this moment memblock may be deinitialized already and its
1554 * internal data may be destroyed (after execution of memblock_free_all)
1555 */
1556 if (WARN_ON_ONCE(slab_is_available()))
1557 return kzalloc_node(size, GFP_NOWAIT, nid);
1558
1559 if (max_addr > memblock.current_limit)
1560 max_addr = memblock.current_limit;
1561
1562 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1563 exact_nid);
1564
1565 /* retry allocation without lower limit */
1566 if (!alloc && min_addr)
1567 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1568 exact_nid);
1569
1570 if (!alloc)
1571 return NULL;
1572
1573 return phys_to_virt(alloc);
1574 }
1575
1576 /**
1577 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1578 * without zeroing memory
1579 * @size: size of memory block to be allocated in bytes
1580 * @align: alignment of the region and block's size
1581 * @min_addr: the lower bound of the memory region from where the allocation
1582 * is preferred (phys address)
1583 * @max_addr: the upper bound of the memory region from where the allocation
1584 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1585 * allocate only from memory limited by memblock.current_limit value
1586 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1587 *
1588 * Public function, provides additional debug information (including caller
1589 * info), if enabled. Does not zero allocated memory.
1590 *
1591 * Return:
1592 * Virtual address of allocated memory block on success, NULL on failure.
1593 */
memblock_alloc_exact_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1594 void * __init memblock_alloc_exact_nid_raw(
1595 phys_addr_t size, phys_addr_t align,
1596 phys_addr_t min_addr, phys_addr_t max_addr,
1597 int nid)
1598 {
1599 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1600 __func__, (u64)size, (u64)align, nid, &min_addr,
1601 &max_addr, (void *)_RET_IP_);
1602
1603 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1604 true);
1605 }
1606
1607 /**
1608 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1609 * memory and without panicking
1610 * @size: size of memory block to be allocated in bytes
1611 * @align: alignment of the region and block's size
1612 * @min_addr: the lower bound of the memory region from where the allocation
1613 * is preferred (phys address)
1614 * @max_addr: the upper bound of the memory region from where the allocation
1615 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1616 * allocate only from memory limited by memblock.current_limit value
1617 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1618 *
1619 * Public function, provides additional debug information (including caller
1620 * info), if enabled. Does not zero allocated memory, does not panic if request
1621 * cannot be satisfied.
1622 *
1623 * Return:
1624 * Virtual address of allocated memory block on success, NULL on failure.
1625 */
memblock_alloc_try_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1626 void * __init memblock_alloc_try_nid_raw(
1627 phys_addr_t size, phys_addr_t align,
1628 phys_addr_t min_addr, phys_addr_t max_addr,
1629 int nid)
1630 {
1631 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1632 __func__, (u64)size, (u64)align, nid, &min_addr,
1633 &max_addr, (void *)_RET_IP_);
1634
1635 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1636 false);
1637 }
1638
1639 /**
1640 * memblock_alloc_try_nid - allocate boot memory block
1641 * @size: size of memory block to be allocated in bytes
1642 * @align: alignment of the region and block's size
1643 * @min_addr: the lower bound of the memory region from where the allocation
1644 * is preferred (phys address)
1645 * @max_addr: the upper bound of the memory region from where the allocation
1646 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1647 * allocate only from memory limited by memblock.current_limit value
1648 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1649 *
1650 * Public function, provides additional debug information (including caller
1651 * info), if enabled. This function zeroes the allocated memory.
1652 *
1653 * Return:
1654 * Virtual address of allocated memory block on success, NULL on failure.
1655 */
memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1656 void * __init memblock_alloc_try_nid(
1657 phys_addr_t size, phys_addr_t align,
1658 phys_addr_t min_addr, phys_addr_t max_addr,
1659 int nid)
1660 {
1661 void *ptr;
1662
1663 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1664 __func__, (u64)size, (u64)align, nid, &min_addr,
1665 &max_addr, (void *)_RET_IP_);
1666 ptr = memblock_alloc_internal(size, align,
1667 min_addr, max_addr, nid, false);
1668 if (ptr)
1669 memset(ptr, 0, size);
1670
1671 return ptr;
1672 }
1673
1674 /**
1675 * memblock_free_late - free pages directly to buddy allocator
1676 * @base: phys starting address of the boot memory block
1677 * @size: size of the boot memory block in bytes
1678 *
1679 * This is only useful when the memblock allocator has already been torn
1680 * down, but we are still initializing the system. Pages are released directly
1681 * to the buddy allocator.
1682 */
memblock_free_late(phys_addr_t base,phys_addr_t size)1683 void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
1684 {
1685 phys_addr_t cursor, end;
1686
1687 end = base + size - 1;
1688 memblock_dbg("%s: [%pa-%pa] %pS\n",
1689 __func__, &base, &end, (void *)_RET_IP_);
1690 kmemleak_free_part_phys(base, size);
1691 cursor = PFN_UP(base);
1692 end = PFN_DOWN(base + size);
1693
1694 for (; cursor < end; cursor++) {
1695 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1696 totalram_pages_inc();
1697 }
1698 }
1699
1700 /*
1701 * Remaining API functions
1702 */
1703
memblock_phys_mem_size(void)1704 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1705 {
1706 return memblock.memory.total_size;
1707 }
1708
memblock_reserved_size(void)1709 phys_addr_t __init_memblock memblock_reserved_size(void)
1710 {
1711 return memblock.reserved.total_size;
1712 }
1713
1714 /* lowest address */
memblock_start_of_DRAM(void)1715 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1716 {
1717 return memblock.memory.regions[0].base;
1718 }
1719
memblock_end_of_DRAM(void)1720 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1721 {
1722 int idx = memblock.memory.cnt - 1;
1723
1724 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1725 }
1726
__find_max_addr(phys_addr_t limit)1727 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1728 {
1729 phys_addr_t max_addr = PHYS_ADDR_MAX;
1730 struct memblock_region *r;
1731
1732 /*
1733 * translate the memory @limit size into the max address within one of
1734 * the memory memblock regions, if the @limit exceeds the total size
1735 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1736 */
1737 for_each_mem_region(r) {
1738 if (limit <= r->size) {
1739 max_addr = r->base + limit;
1740 break;
1741 }
1742 limit -= r->size;
1743 }
1744
1745 return max_addr;
1746 }
1747
memblock_enforce_memory_limit(phys_addr_t limit)1748 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1749 {
1750 phys_addr_t max_addr;
1751
1752 if (!limit)
1753 return;
1754
1755 max_addr = __find_max_addr(limit);
1756
1757 /* @limit exceeds the total size of the memory, do nothing */
1758 if (max_addr == PHYS_ADDR_MAX)
1759 return;
1760
1761 /* truncate both memory and reserved regions */
1762 memblock_remove_range(&memblock.memory, max_addr,
1763 PHYS_ADDR_MAX);
1764 memblock_remove_range(&memblock.reserved, max_addr,
1765 PHYS_ADDR_MAX);
1766 }
1767
memblock_cap_memory_range(phys_addr_t base,phys_addr_t size)1768 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1769 {
1770 int start_rgn, end_rgn;
1771 int i, ret;
1772
1773 if (!size)
1774 return;
1775
1776 if (!memblock_memory->total_size) {
1777 pr_warn("%s: No memory registered yet\n", __func__);
1778 return;
1779 }
1780
1781 ret = memblock_isolate_range(&memblock.memory, base, size,
1782 &start_rgn, &end_rgn);
1783 if (ret)
1784 return;
1785
1786 /* remove all the MAP regions */
1787 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1788 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1789 memblock_remove_region(&memblock.memory, i);
1790
1791 for (i = start_rgn - 1; i >= 0; i--)
1792 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1793 memblock_remove_region(&memblock.memory, i);
1794
1795 /* truncate the reserved regions */
1796 memblock_remove_range(&memblock.reserved, 0, base);
1797 memblock_remove_range(&memblock.reserved,
1798 base + size, PHYS_ADDR_MAX);
1799 }
1800
memblock_mem_limit_remove_map(phys_addr_t limit)1801 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1802 {
1803 phys_addr_t max_addr;
1804
1805 if (!limit)
1806 return;
1807
1808 max_addr = __find_max_addr(limit);
1809
1810 /* @limit exceeds the total size of the memory, do nothing */
1811 if (max_addr == PHYS_ADDR_MAX)
1812 return;
1813
1814 memblock_cap_memory_range(0, max_addr);
1815 }
1816
memblock_search(struct memblock_type * type,phys_addr_t addr)1817 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1818 {
1819 unsigned int left = 0, right = type->cnt;
1820
1821 do {
1822 unsigned int mid = (right + left) / 2;
1823
1824 if (addr < type->regions[mid].base)
1825 right = mid;
1826 else if (addr >= (type->regions[mid].base +
1827 type->regions[mid].size))
1828 left = mid + 1;
1829 else
1830 return mid;
1831 } while (left < right);
1832 return -1;
1833 }
1834
memblock_is_reserved(phys_addr_t addr)1835 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1836 {
1837 return memblock_search(&memblock.reserved, addr) != -1;
1838 }
1839
memblock_is_memory(phys_addr_t addr)1840 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1841 {
1842 return memblock_search(&memblock.memory, addr) != -1;
1843 }
1844
memblock_is_map_memory(phys_addr_t addr)1845 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1846 {
1847 int i = memblock_search(&memblock.memory, addr);
1848
1849 if (i == -1)
1850 return false;
1851 return !memblock_is_nomap(&memblock.memory.regions[i]);
1852 }
1853
memblock_search_pfn_nid(unsigned long pfn,unsigned long * start_pfn,unsigned long * end_pfn)1854 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1855 unsigned long *start_pfn, unsigned long *end_pfn)
1856 {
1857 struct memblock_type *type = &memblock.memory;
1858 int mid = memblock_search(type, PFN_PHYS(pfn));
1859
1860 if (mid == -1)
1861 return -1;
1862
1863 *start_pfn = PFN_DOWN(type->regions[mid].base);
1864 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1865
1866 return memblock_get_region_node(&type->regions[mid]);
1867 }
1868
1869 /**
1870 * memblock_is_region_memory - check if a region is a subset of memory
1871 * @base: base of region to check
1872 * @size: size of region to check
1873 *
1874 * Check if the region [@base, @base + @size) is a subset of a memory block.
1875 *
1876 * Return:
1877 * 0 if false, non-zero if true
1878 */
memblock_is_region_memory(phys_addr_t base,phys_addr_t size)1879 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1880 {
1881 int idx = memblock_search(&memblock.memory, base);
1882 phys_addr_t end = base + memblock_cap_size(base, &size);
1883
1884 if (idx == -1)
1885 return false;
1886 return (memblock.memory.regions[idx].base +
1887 memblock.memory.regions[idx].size) >= end;
1888 }
1889
1890 /**
1891 * memblock_is_region_reserved - check if a region intersects reserved memory
1892 * @base: base of region to check
1893 * @size: size of region to check
1894 *
1895 * Check if the region [@base, @base + @size) intersects a reserved
1896 * memory block.
1897 *
1898 * Return:
1899 * True if they intersect, false if not.
1900 */
memblock_is_region_reserved(phys_addr_t base,phys_addr_t size)1901 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1902 {
1903 return memblock_overlaps_region(&memblock.reserved, base, size);
1904 }
1905
memblock_trim_memory(phys_addr_t align)1906 void __init_memblock memblock_trim_memory(phys_addr_t align)
1907 {
1908 phys_addr_t start, end, orig_start, orig_end;
1909 struct memblock_region *r;
1910
1911 for_each_mem_region(r) {
1912 orig_start = r->base;
1913 orig_end = r->base + r->size;
1914 start = round_up(orig_start, align);
1915 end = round_down(orig_end, align);
1916
1917 if (start == orig_start && end == orig_end)
1918 continue;
1919
1920 if (start < end) {
1921 r->base = start;
1922 r->size = end - start;
1923 } else {
1924 memblock_remove_region(&memblock.memory,
1925 r - memblock.memory.regions);
1926 r--;
1927 }
1928 }
1929 }
1930
memblock_set_current_limit(phys_addr_t limit)1931 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1932 {
1933 memblock.current_limit = limit;
1934 }
1935
memblock_get_current_limit(void)1936 phys_addr_t __init_memblock memblock_get_current_limit(void)
1937 {
1938 return memblock.current_limit;
1939 }
1940
memblock_dump(struct memblock_type * type)1941 static void __init_memblock memblock_dump(struct memblock_type *type)
1942 {
1943 phys_addr_t base, end, size;
1944 enum memblock_flags flags;
1945 int idx;
1946 struct memblock_region *rgn;
1947
1948 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1949
1950 for_each_memblock_type(idx, type, rgn) {
1951 char nid_buf[32] = "";
1952
1953 base = rgn->base;
1954 size = rgn->size;
1955 end = base + size - 1;
1956 flags = rgn->flags;
1957 #ifdef CONFIG_NUMA
1958 if (numa_valid_node(memblock_get_region_node(rgn)))
1959 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1960 memblock_get_region_node(rgn));
1961 #endif
1962 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1963 type->name, idx, &base, &end, &size, nid_buf, flags);
1964 }
1965 }
1966
__memblock_dump_all(void)1967 static void __init_memblock __memblock_dump_all(void)
1968 {
1969 pr_info("MEMBLOCK configuration:\n");
1970 pr_info(" memory size = %pa reserved size = %pa\n",
1971 &memblock.memory.total_size,
1972 &memblock.reserved.total_size);
1973
1974 memblock_dump(&memblock.memory);
1975 memblock_dump(&memblock.reserved);
1976 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1977 memblock_dump(&physmem);
1978 #endif
1979 }
1980
memblock_dump_all(void)1981 void __init_memblock memblock_dump_all(void)
1982 {
1983 if (memblock_debug)
1984 __memblock_dump_all();
1985 }
1986
memblock_allow_resize(void)1987 void __init memblock_allow_resize(void)
1988 {
1989 memblock_can_resize = 1;
1990 }
1991
early_memblock(char * p)1992 static int __init early_memblock(char *p)
1993 {
1994 if (p && strstr(p, "debug"))
1995 memblock_debug = 1;
1996 return 0;
1997 }
1998 early_param("memblock", early_memblock);
1999
free_memmap(unsigned long start_pfn,unsigned long end_pfn)2000 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
2001 {
2002 struct page *start_pg, *end_pg;
2003 phys_addr_t pg, pgend;
2004
2005 /*
2006 * Convert start_pfn/end_pfn to a struct page pointer.
2007 */
2008 start_pg = pfn_to_page(start_pfn - 1) + 1;
2009 end_pg = pfn_to_page(end_pfn - 1) + 1;
2010
2011 /*
2012 * Convert to physical addresses, and round start upwards and end
2013 * downwards.
2014 */
2015 pg = PAGE_ALIGN(__pa(start_pg));
2016 pgend = __pa(end_pg) & PAGE_MASK;
2017
2018 /*
2019 * If there are free pages between these, free the section of the
2020 * memmap array.
2021 */
2022 if (pg < pgend)
2023 memblock_phys_free(pg, pgend - pg);
2024 }
2025
2026 /*
2027 * The mem_map array can get very big. Free the unused area of the memory map.
2028 */
free_unused_memmap(void)2029 static void __init free_unused_memmap(void)
2030 {
2031 unsigned long start, end, prev_end = 0;
2032 int i;
2033
2034 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
2035 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
2036 return;
2037
2038 /*
2039 * This relies on each bank being in address order.
2040 * The banks are sorted previously in bootmem_init().
2041 */
2042 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
2043 #ifdef CONFIG_SPARSEMEM
2044 /*
2045 * Take care not to free memmap entries that don't exist
2046 * due to SPARSEMEM sections which aren't present.
2047 */
2048 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
2049 #endif
2050 /*
2051 * Align down here since many operations in VM subsystem
2052 * presume that there are no holes in the memory map inside
2053 * a pageblock
2054 */
2055 start = pageblock_start_pfn(start);
2056
2057 /*
2058 * If we had a previous bank, and there is a space
2059 * between the current bank and the previous, free it.
2060 */
2061 if (prev_end && prev_end < start)
2062 free_memmap(prev_end, start);
2063
2064 /*
2065 * Align up here since many operations in VM subsystem
2066 * presume that there are no holes in the memory map inside
2067 * a pageblock
2068 */
2069 prev_end = pageblock_align(end);
2070 }
2071
2072 #ifdef CONFIG_SPARSEMEM
2073 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
2074 prev_end = pageblock_align(end);
2075 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2076 }
2077 #endif
2078 }
2079
__free_pages_memory(unsigned long start,unsigned long end)2080 static void __init __free_pages_memory(unsigned long start, unsigned long end)
2081 {
2082 int order;
2083
2084 while (start < end) {
2085 /*
2086 * Free the pages in the largest chunks alignment allows.
2087 *
2088 * __ffs() behaviour is undefined for 0. start == 0 is
2089 * MAX_ORDER-aligned, set order to MAX_ORDER for the case.
2090 */
2091 if (start)
2092 order = min_t(int, MAX_ORDER, __ffs(start));
2093 else
2094 order = MAX_ORDER;
2095
2096 while (start + (1UL << order) > end)
2097 order--;
2098
2099 memblock_free_pages(pfn_to_page(start), start, order);
2100
2101 start += (1UL << order);
2102 }
2103 }
2104
__free_memory_core(phys_addr_t start,phys_addr_t end)2105 static unsigned long __init __free_memory_core(phys_addr_t start,
2106 phys_addr_t end)
2107 {
2108 unsigned long start_pfn = PFN_UP(start);
2109 unsigned long end_pfn = min_t(unsigned long,
2110 PFN_DOWN(end), max_low_pfn);
2111
2112 if (start_pfn >= end_pfn)
2113 return 0;
2114
2115 __free_pages_memory(start_pfn, end_pfn);
2116
2117 return end_pfn - start_pfn;
2118 }
2119
memmap_init_reserved_pages(void)2120 static void __init memmap_init_reserved_pages(void)
2121 {
2122 struct memblock_region *region;
2123 phys_addr_t start, end;
2124 int nid;
2125
2126 /*
2127 * set nid on all reserved pages and also treat struct
2128 * pages for the NOMAP regions as PageReserved
2129 */
2130 for_each_mem_region(region) {
2131 nid = memblock_get_region_node(region);
2132 start = region->base;
2133 end = start + region->size;
2134
2135 if (memblock_is_nomap(region))
2136 reserve_bootmem_region(start, end, nid);
2137
2138 memblock_set_node(start, end, &memblock.reserved, nid);
2139 }
2140
2141 /* initialize struct pages for the reserved regions */
2142 for_each_reserved_mem_region(region) {
2143 nid = memblock_get_region_node(region);
2144 start = region->base;
2145 end = start + region->size;
2146
2147 if (!numa_valid_node(nid))
2148 nid = early_pfn_to_nid(PFN_DOWN(start));
2149
2150 reserve_bootmem_region(start, end, nid);
2151 }
2152 }
2153
free_low_memory_core_early(void)2154 static unsigned long __init free_low_memory_core_early(void)
2155 {
2156 unsigned long count = 0;
2157 phys_addr_t start, end;
2158 u64 i;
2159
2160 memblock_clear_hotplug(0, -1);
2161
2162 memmap_init_reserved_pages();
2163
2164 /*
2165 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2166 * because in some case like Node0 doesn't have RAM installed
2167 * low ram will be on Node1
2168 */
2169 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2170 NULL)
2171 count += __free_memory_core(start, end);
2172
2173 return count;
2174 }
2175
2176 static int reset_managed_pages_done __initdata;
2177
reset_node_managed_pages(pg_data_t * pgdat)2178 static void __init reset_node_managed_pages(pg_data_t *pgdat)
2179 {
2180 struct zone *z;
2181
2182 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2183 atomic_long_set(&z->managed_pages, 0);
2184 }
2185
reset_all_zones_managed_pages(void)2186 void __init reset_all_zones_managed_pages(void)
2187 {
2188 struct pglist_data *pgdat;
2189
2190 if (reset_managed_pages_done)
2191 return;
2192
2193 for_each_online_pgdat(pgdat)
2194 reset_node_managed_pages(pgdat);
2195
2196 reset_managed_pages_done = 1;
2197 }
2198
2199 /**
2200 * memblock_free_all - release free pages to the buddy allocator
2201 */
memblock_free_all(void)2202 void __init memblock_free_all(void)
2203 {
2204 unsigned long pages;
2205
2206 free_unused_memmap();
2207 reset_all_zones_managed_pages();
2208
2209 pages = free_low_memory_core_early();
2210 totalram_pages_add(pages);
2211 }
2212
2213 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2214 static const char * const flagname[] = {
2215 [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG",
2216 [ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
2217 [ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
2218 [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
2219 };
2220
memblock_debug_show(struct seq_file * m,void * private)2221 static int memblock_debug_show(struct seq_file *m, void *private)
2222 {
2223 struct memblock_type *type = m->private;
2224 struct memblock_region *reg;
2225 int i, j, nid;
2226 unsigned int count = ARRAY_SIZE(flagname);
2227 phys_addr_t end;
2228
2229 for (i = 0; i < type->cnt; i++) {
2230 reg = &type->regions[i];
2231 end = reg->base + reg->size - 1;
2232 nid = memblock_get_region_node(reg);
2233
2234 seq_printf(m, "%4d: ", i);
2235 seq_printf(m, "%pa..%pa ", ®->base, &end);
2236 if (numa_valid_node(nid))
2237 seq_printf(m, "%4d ", nid);
2238 else
2239 seq_printf(m, "%4c ", 'x');
2240 if (reg->flags) {
2241 for (j = 0; j < count; j++) {
2242 if (reg->flags & (1U << j)) {
2243 seq_printf(m, "%s\n", flagname[j]);
2244 break;
2245 }
2246 }
2247 if (j == count)
2248 seq_printf(m, "%s\n", "UNKNOWN");
2249 } else {
2250 seq_printf(m, "%s\n", "NONE");
2251 }
2252 }
2253 return 0;
2254 }
2255 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2256
memblock_init_debugfs(void)2257 static int __init memblock_init_debugfs(void)
2258 {
2259 struct dentry *root = debugfs_create_dir("memblock", NULL);
2260
2261 debugfs_create_file("memory", 0444, root,
2262 &memblock.memory, &memblock_debug_fops);
2263 debugfs_create_file("reserved", 0444, root,
2264 &memblock.reserved, &memblock_debug_fops);
2265 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2266 debugfs_create_file("physmem", 0444, root, &physmem,
2267 &memblock_debug_fops);
2268 #endif
2269
2270 return 0;
2271 }
2272 __initcall(memblock_init_debugfs);
2273
2274 #endif /* CONFIG_DEBUG_FS */
2275