xref: /openbmc/linux/include/linux/memblock.h (revision b5bf39cd)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
295f72d1eSYinghai Lu #ifndef _LINUX_MEMBLOCK_H
395f72d1eSYinghai Lu #define _LINUX_MEMBLOCK_H
495f72d1eSYinghai Lu 
595f72d1eSYinghai Lu /*
695f72d1eSYinghai Lu  * Logical memory blocks.
795f72d1eSYinghai Lu  *
895f72d1eSYinghai Lu  * Copyright (C) 2001 Peter Bergner, IBM Corp.
995f72d1eSYinghai Lu  */
1095f72d1eSYinghai Lu 
1195f72d1eSYinghai Lu #include <linux/init.h>
1295f72d1eSYinghai Lu #include <linux/mm.h>
1357c8a661SMike Rapoport #include <asm/dma.h>
1457c8a661SMike Rapoport 
1557c8a661SMike Rapoport extern unsigned long max_low_pfn;
1657c8a661SMike Rapoport extern unsigned long min_low_pfn;
1757c8a661SMike Rapoport 
1857c8a661SMike Rapoport /*
1957c8a661SMike Rapoport  * highest page
2057c8a661SMike Rapoport  */
2157c8a661SMike Rapoport extern unsigned long max_pfn;
2257c8a661SMike Rapoport /*
2357c8a661SMike Rapoport  * highest possible page
2457c8a661SMike Rapoport  */
2557c8a661SMike Rapoport extern unsigned long long max_possible_pfn;
2695f72d1eSYinghai Lu 
279a0de1bfSMike Rapoport /**
289a0de1bfSMike Rapoport  * enum memblock_flags - definition of memory region attributes
299a0de1bfSMike Rapoport  * @MEMBLOCK_NONE: no special request
30e14b4155SDavid Hildenbrand  * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
31e14b4155SDavid Hildenbrand  * map during early boot as hot(un)pluggable system RAM (e.g., memory range
32e14b4155SDavid Hildenbrand  * that might get hotunplugged later). With "movable_node" set on the kernel
33e14b4155SDavid Hildenbrand  * commandline, try keeping this memory region hotunpluggable. Does not apply
34e14b4155SDavid Hildenbrand  * to memblocks added ("hotplugged") after early boot.
359a0de1bfSMike Rapoport  * @MEMBLOCK_MIRROR: mirrored region
369092d4f7SMike Rapoport  * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
379092d4f7SMike Rapoport  * reserved in the memory map; refer to memblock_mark_nomap() description
389092d4f7SMike Rapoport  * for further details
39f7892d8eSDavid Hildenbrand  * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
40f7892d8eSDavid Hildenbrand  * via a driver, and never indicated in the firmware-provided memory map as
41f7892d8eSDavid Hildenbrand  * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
42f7892d8eSDavid Hildenbrand  * kernel resource tree.
439a0de1bfSMike Rapoport  */
44e1720feeSMike Rapoport enum memblock_flags {
45fc6daaf9STony Luck 	MEMBLOCK_NONE		= 0x0,	/* No special request */
46fc6daaf9STony Luck 	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
47a3f5bafcSTony Luck 	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
48bf3d3cc5SArd Biesheuvel 	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
49f7892d8eSDavid Hildenbrand 	MEMBLOCK_DRIVER_MANAGED = 0x8,	/* always detected via a driver */
50fc6daaf9STony Luck };
5166b16edfSTang Chen 
529a0de1bfSMike Rapoport /**
539a0de1bfSMike Rapoport  * struct memblock_region - represents a memory region
548cbd54f5Schenqiwu  * @base: base address of the region
559a0de1bfSMike Rapoport  * @size: size of the region
569a0de1bfSMike Rapoport  * @flags: memory region attributes
579a0de1bfSMike Rapoport  * @nid: NUMA node id
589a0de1bfSMike Rapoport  */
59e3239ff9SBenjamin Herrenschmidt struct memblock_region {
602898cc4cSBenjamin Herrenschmidt 	phys_addr_t base;
612898cc4cSBenjamin Herrenschmidt 	phys_addr_t size;
62e1720feeSMike Rapoport 	enum memblock_flags flags;
63a9ee6cf5SMike Rapoport #ifdef CONFIG_NUMA
647c0caeb8STejun Heo 	int nid;
657c0caeb8STejun Heo #endif
6695f72d1eSYinghai Lu };
6795f72d1eSYinghai Lu 
689a0de1bfSMike Rapoport /**
699a0de1bfSMike Rapoport  * struct memblock_type - collection of memory regions of certain type
709a0de1bfSMike Rapoport  * @cnt: number of regions
719a0de1bfSMike Rapoport  * @max: size of the allocated array
729a0de1bfSMike Rapoport  * @total_size: size of all regions
739a0de1bfSMike Rapoport  * @regions: array of regions
749a0de1bfSMike Rapoport  * @name: the memory type symbolic name
759a0de1bfSMike Rapoport  */
76e3239ff9SBenjamin Herrenschmidt struct memblock_type {
779a0de1bfSMike Rapoport 	unsigned long cnt;
789a0de1bfSMike Rapoport 	unsigned long max;
799a0de1bfSMike Rapoport 	phys_addr_t total_size;
80bf23c51fSBenjamin Herrenschmidt 	struct memblock_region *regions;
810262d9c8SHeiko Carstens 	char *name;
8295f72d1eSYinghai Lu };
8395f72d1eSYinghai Lu 
849a0de1bfSMike Rapoport /**
859a0de1bfSMike Rapoport  * struct memblock - memblock allocator metadata
869a0de1bfSMike Rapoport  * @bottom_up: is bottom up direction?
879a0de1bfSMike Rapoport  * @current_limit: physical address of the current allocation limit
888cbd54f5Schenqiwu  * @memory: usable memory regions
899a0de1bfSMike Rapoport  * @reserved: reserved memory regions
909a0de1bfSMike Rapoport  */
9195f72d1eSYinghai Lu struct memblock {
9279442ed1STang Chen 	bool bottom_up;  /* is bottom up direction? */
932898cc4cSBenjamin Herrenschmidt 	phys_addr_t current_limit;
94e3239ff9SBenjamin Herrenschmidt 	struct memblock_type memory;
95e3239ff9SBenjamin Herrenschmidt 	struct memblock_type reserved;
9695f72d1eSYinghai Lu };
9795f72d1eSYinghai Lu 
9895f72d1eSYinghai Lu extern struct memblock memblock;
995e63cf43SYinghai Lu 
100350e88baSMike Rapoport #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
101036fbb21SKirill A. Shutemov #define __init_memblock __meminit
102036fbb21SKirill A. Shutemov #define __initdata_memblock __meminitdata
1033010f876SPavel Tatashin void memblock_discard(void);
104036fbb21SKirill A. Shutemov #else
105036fbb21SKirill A. Shutemov #define __init_memblock
106036fbb21SKirill A. Shutemov #define __initdata_memblock
memblock_discard(void)107350e88baSMike Rapoport static inline void memblock_discard(void) {}
108036fbb21SKirill A. Shutemov #endif
109036fbb21SKirill A. Shutemov 
1101aadc056STejun Heo void memblock_allow_resize(void);
111952eea9bSDavid Hildenbrand int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
112952eea9bSDavid Hildenbrand 		      enum memblock_flags flags);
113581adcbeSTejun Heo int memblock_add(phys_addr_t base, phys_addr_t size);
114581adcbeSTejun Heo int memblock_remove(phys_addr_t base, phys_addr_t size);
1153ecc6834SMike Rapoport int memblock_phys_free(phys_addr_t base, phys_addr_t size);
116581adcbeSTejun Heo int memblock_reserve(phys_addr_t base, phys_addr_t size);
11702634a44SAnshuman Khandual #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
11802634a44SAnshuman Khandual int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
11902634a44SAnshuman Khandual #endif
1206ede1fd3SYinghai Lu void memblock_trim_memory(phys_addr_t align);
121*b5bf39cdSAlison Schofield unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
122*b5bf39cdSAlison Schofield 				     phys_addr_t base2, phys_addr_t size2);
12395cf82ecSTang Chen bool memblock_overlaps_region(struct memblock_type *type,
12495cf82ecSTang Chen 			      phys_addr_t base, phys_addr_t size);
12566b16edfSTang Chen int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
12666b16edfSTang Chen int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
127a3f5bafcSTony Luck int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
128bf3d3cc5SArd Biesheuvel int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
1294c546b8aSAKASHI Takahiro int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
130f1af9d3aSPhilipp Hachtmann 
131097d43d8SDaeseok Youn void memblock_free_all(void);
1324421cca0SMike Rapoport void memblock_free(void *ptr, size_t size);
13357c8a661SMike Rapoport void reset_all_zones_managed_pages(void);
13457c8a661SMike Rapoport 
135f1af9d3aSPhilipp Hachtmann /* Low level functions */
136e1720feeSMike Rapoport void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
137fc6daaf9STony Luck 		      struct memblock_type *type_a,
138f1af9d3aSPhilipp Hachtmann 		      struct memblock_type *type_b, phys_addr_t *out_start,
139f1af9d3aSPhilipp Hachtmann 		      phys_addr_t *out_end, int *out_nid);
140f1af9d3aSPhilipp Hachtmann 
141e1720feeSMike Rapoport void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
142fc6daaf9STony Luck 			  struct memblock_type *type_a,
143f1af9d3aSPhilipp Hachtmann 			  struct memblock_type *type_b, phys_addr_t *out_start,
144f1af9d3aSPhilipp Hachtmann 			  phys_addr_t *out_end, int *out_nid);
145f1af9d3aSPhilipp Hachtmann 
146621d9739SMike Rapoport void memblock_free_late(phys_addr_t base, phys_addr_t size);
1473010f876SPavel Tatashin 
14877649905SDavid Hildenbrand #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
__next_physmem_range(u64 * idx,struct memblock_type * type,phys_addr_t * out_start,phys_addr_t * out_end)14977649905SDavid Hildenbrand static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
15077649905SDavid Hildenbrand 					phys_addr_t *out_start,
15177649905SDavid Hildenbrand 					phys_addr_t *out_end)
15277649905SDavid Hildenbrand {
15377649905SDavid Hildenbrand 	extern struct memblock_type physmem;
15477649905SDavid Hildenbrand 
15577649905SDavid Hildenbrand 	__next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
15677649905SDavid Hildenbrand 			 out_start, out_end, NULL);
15777649905SDavid Hildenbrand }
15877649905SDavid Hildenbrand 
15977649905SDavid Hildenbrand /**
16077649905SDavid Hildenbrand  * for_each_physmem_range - iterate through physmem areas not included in type.
16177649905SDavid Hildenbrand  * @i: u64 used as loop variable
16277649905SDavid Hildenbrand  * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
16377649905SDavid Hildenbrand  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
16477649905SDavid Hildenbrand  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
16577649905SDavid Hildenbrand  */
16677649905SDavid Hildenbrand #define for_each_physmem_range(i, type, p_start, p_end)			\
16777649905SDavid Hildenbrand 	for (i = 0, __next_physmem_range(&i, type, p_start, p_end);	\
16877649905SDavid Hildenbrand 	     i != (u64)ULLONG_MAX;					\
16977649905SDavid Hildenbrand 	     __next_physmem_range(&i, type, p_start, p_end))
17077649905SDavid Hildenbrand #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
17177649905SDavid Hildenbrand 
172f1af9d3aSPhilipp Hachtmann /**
1736e245ad4SMike Rapoport  * __for_each_mem_range - iterate through memblock areas from type_a and not
174f1af9d3aSPhilipp Hachtmann  * included in type_b. Or just type_a if type_b is NULL.
175f1af9d3aSPhilipp Hachtmann  * @i: u64 used as loop variable
176f1af9d3aSPhilipp Hachtmann  * @type_a: ptr to memblock_type to iterate
177f1af9d3aSPhilipp Hachtmann  * @type_b: ptr to memblock_type which excludes from the iteration
178f1af9d3aSPhilipp Hachtmann  * @nid: node selector, %NUMA_NO_NODE for all nodes
179fc6daaf9STony Luck  * @flags: pick from blocks based on memory attributes
180f1af9d3aSPhilipp Hachtmann  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
181f1af9d3aSPhilipp Hachtmann  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
182f1af9d3aSPhilipp Hachtmann  * @p_nid: ptr to int for nid of the range, can be %NULL
183f1af9d3aSPhilipp Hachtmann  */
1846e245ad4SMike Rapoport #define __for_each_mem_range(i, type_a, type_b, nid, flags,		\
185f1af9d3aSPhilipp Hachtmann 			   p_start, p_end, p_nid)			\
186fc6daaf9STony Luck 	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
187f1af9d3aSPhilipp Hachtmann 				     p_start, p_end, p_nid);		\
188f1af9d3aSPhilipp Hachtmann 	     i != (u64)ULLONG_MAX;					\
189fc6daaf9STony Luck 	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
190f1af9d3aSPhilipp Hachtmann 			      p_start, p_end, p_nid))
191f1af9d3aSPhilipp Hachtmann 
192f1af9d3aSPhilipp Hachtmann /**
1936e245ad4SMike Rapoport  * __for_each_mem_range_rev - reverse iterate through memblock areas from
194f1af9d3aSPhilipp Hachtmann  * type_a and not included in type_b. Or just type_a if type_b is NULL.
195f1af9d3aSPhilipp Hachtmann  * @i: u64 used as loop variable
196f1af9d3aSPhilipp Hachtmann  * @type_a: ptr to memblock_type to iterate
197f1af9d3aSPhilipp Hachtmann  * @type_b: ptr to memblock_type which excludes from the iteration
198f1af9d3aSPhilipp Hachtmann  * @nid: node selector, %NUMA_NO_NODE for all nodes
199fc6daaf9STony Luck  * @flags: pick from blocks based on memory attributes
200f1af9d3aSPhilipp Hachtmann  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
201f1af9d3aSPhilipp Hachtmann  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
202f1af9d3aSPhilipp Hachtmann  * @p_nid: ptr to int for nid of the range, can be %NULL
203f1af9d3aSPhilipp Hachtmann  */
2046e245ad4SMike Rapoport #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
205f1af9d3aSPhilipp Hachtmann 				 p_start, p_end, p_nid)			\
206f1af9d3aSPhilipp Hachtmann 	for (i = (u64)ULLONG_MAX,					\
207fc6daaf9STony Luck 		     __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
208f1af9d3aSPhilipp Hachtmann 					  p_start, p_end, p_nid);	\
209f1af9d3aSPhilipp Hachtmann 	     i != (u64)ULLONG_MAX;					\
210fc6daaf9STony Luck 	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
211f1af9d3aSPhilipp Hachtmann 				  p_start, p_end, p_nid))
212f1af9d3aSPhilipp Hachtmann 
2138e7a7f86SRobin Holt /**
2146e245ad4SMike Rapoport  * for_each_mem_range - iterate through memory areas.
2156e245ad4SMike Rapoport  * @i: u64 used as loop variable
2166e245ad4SMike Rapoport  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2176e245ad4SMike Rapoport  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2186e245ad4SMike Rapoport  */
2196e245ad4SMike Rapoport #define for_each_mem_range(i, p_start, p_end) \
2206e245ad4SMike Rapoport 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,	\
221f7892d8eSDavid Hildenbrand 			     MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
222f7892d8eSDavid Hildenbrand 			     p_start, p_end, NULL)
2236e245ad4SMike Rapoport 
2246e245ad4SMike Rapoport /**
2256e245ad4SMike Rapoport  * for_each_mem_range_rev - reverse iterate through memblock areas from
2266e245ad4SMike Rapoport  * type_a and not included in type_b. Or just type_a if type_b is NULL.
2276e245ad4SMike Rapoport  * @i: u64 used as loop variable
2286e245ad4SMike Rapoport  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2296e245ad4SMike Rapoport  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2306e245ad4SMike Rapoport  */
2316e245ad4SMike Rapoport #define for_each_mem_range_rev(i, p_start, p_end)			\
2326e245ad4SMike Rapoport 	__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
233f7892d8eSDavid Hildenbrand 				 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
234f7892d8eSDavid Hildenbrand 				 p_start, p_end, NULL)
2356e245ad4SMike Rapoport 
2366e245ad4SMike Rapoport /**
2379f3d5eaaSMike Rapoport  * for_each_reserved_mem_range - iterate over all reserved memblock areas
2388e7a7f86SRobin Holt  * @i: u64 used as loop variable
2398e7a7f86SRobin Holt  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2408e7a7f86SRobin Holt  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2418e7a7f86SRobin Holt  *
2428e7a7f86SRobin Holt  * Walks over reserved areas of memblock. Available as soon as memblock
2438e7a7f86SRobin Holt  * is initialized.
2448e7a7f86SRobin Holt  */
2459f3d5eaaSMike Rapoport #define for_each_reserved_mem_range(i, p_start, p_end)			\
2469f3d5eaaSMike Rapoport 	__for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE,	\
2479f3d5eaaSMike Rapoport 			     MEMBLOCK_NONE, p_start, p_end, NULL)
2488e7a7f86SRobin Holt 
memblock_is_hotpluggable(struct memblock_region * m)24955ac590cSTang Chen static inline bool memblock_is_hotpluggable(struct memblock_region *m)
25055ac590cSTang Chen {
25155ac590cSTang Chen 	return m->flags & MEMBLOCK_HOTPLUG;
25255ac590cSTang Chen }
25355ac590cSTang Chen 
memblock_is_mirror(struct memblock_region * m)254a3f5bafcSTony Luck static inline bool memblock_is_mirror(struct memblock_region *m)
255a3f5bafcSTony Luck {
256a3f5bafcSTony Luck 	return m->flags & MEMBLOCK_MIRROR;
257a3f5bafcSTony Luck }
258a3f5bafcSTony Luck 
memblock_is_nomap(struct memblock_region * m)259bf3d3cc5SArd Biesheuvel static inline bool memblock_is_nomap(struct memblock_region *m)
260bf3d3cc5SArd Biesheuvel {
261bf3d3cc5SArd Biesheuvel 	return m->flags & MEMBLOCK_NOMAP;
262bf3d3cc5SArd Biesheuvel }
263bf3d3cc5SArd Biesheuvel 
memblock_is_driver_managed(struct memblock_region * m)264f7892d8eSDavid Hildenbrand static inline bool memblock_is_driver_managed(struct memblock_region *m)
265f7892d8eSDavid Hildenbrand {
266f7892d8eSDavid Hildenbrand 	return m->flags & MEMBLOCK_DRIVER_MANAGED;
267f7892d8eSDavid Hildenbrand }
268f7892d8eSDavid Hildenbrand 
269e76b63f8SYinghai Lu int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
270e76b63f8SYinghai Lu 			    unsigned long  *end_pfn);
2710ee332c1STejun Heo void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
2720ee332c1STejun Heo 			  unsigned long *out_end_pfn, int *out_nid);
2730ee332c1STejun Heo 
2740ee332c1STejun Heo /**
2750ee332c1STejun Heo  * for_each_mem_pfn_range - early memory pfn range iterator
2760ee332c1STejun Heo  * @i: an integer used as loop variable
2770ee332c1STejun Heo  * @nid: node selector, %MAX_NUMNODES for all nodes
2780ee332c1STejun Heo  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
2790ee332c1STejun Heo  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
2800ee332c1STejun Heo  * @p_nid: ptr to int for nid of the range, can be %NULL
2810ee332c1STejun Heo  *
282f2d52fe5SWanpeng Li  * Walks over configured memory ranges.
2830ee332c1STejun Heo  */
2840ee332c1STejun Heo #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
2850ee332c1STejun Heo 	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
2860ee332c1STejun Heo 	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
2870ee332c1STejun Heo 
288837566e7SAlexander Duyck #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
289837566e7SAlexander Duyck void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
290837566e7SAlexander Duyck 				  unsigned long *out_spfn,
291837566e7SAlexander Duyck 				  unsigned long *out_epfn);
292837566e7SAlexander Duyck /**
293909782adSMauro Carvalho Chehab  * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
294837566e7SAlexander Duyck  * memblock areas
295837566e7SAlexander Duyck  * @i: u64 used as loop variable
296837566e7SAlexander Duyck  * @zone: zone in which all of the memory blocks reside
297837566e7SAlexander Duyck  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
298837566e7SAlexander Duyck  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
299837566e7SAlexander Duyck  *
300837566e7SAlexander Duyck  * Walks over free (memory && !reserved) areas of memblock in a specific
301837566e7SAlexander Duyck  * zone. Available once memblock and an empty zone is initialized. The main
302837566e7SAlexander Duyck  * assumption is that the zone start, end, and pgdat have been associated.
303837566e7SAlexander Duyck  * This way we can use the zone to determine NUMA node, and if a given part
304837566e7SAlexander Duyck  * of the memblock is valid for the zone.
305837566e7SAlexander Duyck  */
306837566e7SAlexander Duyck #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
307837566e7SAlexander Duyck 	for (i = 0,							\
308837566e7SAlexander Duyck 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
309837566e7SAlexander Duyck 	     i != U64_MAX;					\
310837566e7SAlexander Duyck 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
3110e56acaeSAlexander Duyck 
3120e56acaeSAlexander Duyck /**
313909782adSMauro Carvalho Chehab  * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
3140e56acaeSAlexander Duyck  * free memblock areas from a given point
3150e56acaeSAlexander Duyck  * @i: u64 used as loop variable
3160e56acaeSAlexander Duyck  * @zone: zone in which all of the memory blocks reside
3170e56acaeSAlexander Duyck  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
3180e56acaeSAlexander Duyck  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
3190e56acaeSAlexander Duyck  *
3200e56acaeSAlexander Duyck  * Walks over free (memory && !reserved) areas of memblock in a specific
3210e56acaeSAlexander Duyck  * zone, continuing from current position. Available as soon as memblock is
3220e56acaeSAlexander Duyck  * initialized.
3230e56acaeSAlexander Duyck  */
3240e56acaeSAlexander Duyck #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
3250e56acaeSAlexander Duyck 	for (; i != U64_MAX;					  \
3260e56acaeSAlexander Duyck 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
327ecd09650SDaniel Jordan 
328ecd09650SDaniel Jordan int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
329ecd09650SDaniel Jordan 
330837566e7SAlexander Duyck #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
331837566e7SAlexander Duyck 
33235fd0808STejun Heo /**
33335fd0808STejun Heo  * for_each_free_mem_range - iterate through free memblock areas
33435fd0808STejun Heo  * @i: u64 used as loop variable
335b1154233SGrygorii Strashko  * @nid: node selector, %NUMA_NO_NODE for all nodes
336d30b5545SFlorian Fainelli  * @flags: pick from blocks based on memory attributes
33735fd0808STejun Heo  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
33835fd0808STejun Heo  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
33935fd0808STejun Heo  * @p_nid: ptr to int for nid of the range, can be %NULL
34035fd0808STejun Heo  *
34135fd0808STejun Heo  * Walks over free (memory && !reserved) areas of memblock.  Available as
34235fd0808STejun Heo  * soon as memblock is initialized.
34335fd0808STejun Heo  */
344fc6daaf9STony Luck #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
3456e245ad4SMike Rapoport 	__for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
346fc6daaf9STony Luck 			     nid, flags, p_start, p_end, p_nid)
3477bd0b0f0STejun Heo 
3487bd0b0f0STejun Heo /**
3497bd0b0f0STejun Heo  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
3507bd0b0f0STejun Heo  * @i: u64 used as loop variable
351b1154233SGrygorii Strashko  * @nid: node selector, %NUMA_NO_NODE for all nodes
352d30b5545SFlorian Fainelli  * @flags: pick from blocks based on memory attributes
3537bd0b0f0STejun Heo  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
3547bd0b0f0STejun Heo  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
3557bd0b0f0STejun Heo  * @p_nid: ptr to int for nid of the range, can be %NULL
3567bd0b0f0STejun Heo  *
3577bd0b0f0STejun Heo  * Walks over free (memory && !reserved) areas of memblock in reverse
3587bd0b0f0STejun Heo  * order.  Available as soon as memblock is initialized.
3597bd0b0f0STejun Heo  */
360fc6daaf9STony Luck #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
361fc6daaf9STony Luck 					p_nid)				\
3626e245ad4SMike Rapoport 	__for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
363fc6daaf9STony Luck 				 nid, flags, p_start, p_end, p_nid)
3647bd0b0f0STejun Heo 
365e7e8de59STang Chen int memblock_set_node(phys_addr_t base, phys_addr_t size,
366e7e8de59STang Chen 		      struct memblock_type *type, int nid);
3677c0caeb8STejun Heo 
368a9ee6cf5SMike Rapoport #ifdef CONFIG_NUMA
memblock_set_region_node(struct memblock_region * r,int nid)3697c0caeb8STejun Heo static inline void memblock_set_region_node(struct memblock_region *r, int nid)
3707c0caeb8STejun Heo {
3717c0caeb8STejun Heo 	r->nid = nid;
3727c0caeb8STejun Heo }
3737c0caeb8STejun Heo 
memblock_get_region_node(const struct memblock_region * r)3747c0caeb8STejun Heo static inline int memblock_get_region_node(const struct memblock_region *r)
3757c0caeb8STejun Heo {
3767c0caeb8STejun Heo 	return r->nid;
3777c0caeb8STejun Heo }
3787c0caeb8STejun Heo #else
memblock_set_region_node(struct memblock_region * r,int nid)3797c0caeb8STejun Heo static inline void memblock_set_region_node(struct memblock_region *r, int nid)
3807c0caeb8STejun Heo {
3817c0caeb8STejun Heo }
3827c0caeb8STejun Heo 
memblock_get_region_node(const struct memblock_region * r)3837c0caeb8STejun Heo static inline int memblock_get_region_node(const struct memblock_region *r)
3847c0caeb8STejun Heo {
3857c0caeb8STejun Heo 	return 0;
3867c0caeb8STejun Heo }
387a9ee6cf5SMike Rapoport #endif /* CONFIG_NUMA */
3887c0caeb8STejun Heo 
38957c8a661SMike Rapoport /* Flags for memblock allocation APIs */
39057c8a661SMike Rapoport #define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
39157c8a661SMike Rapoport #define MEMBLOCK_ALLOC_ACCESSIBLE	0
392c6975d7cSQian Cai #define MEMBLOCK_ALLOC_NOLEAKTRACE	1
39357c8a661SMike Rapoport 
39457c8a661SMike Rapoport /* We are using top down, so it is safe to use 0 here */
39557c8a661SMike Rapoport #define MEMBLOCK_LOW_LIMIT 0
39657c8a661SMike Rapoport 
39757c8a661SMike Rapoport #ifndef ARCH_LOW_ADDRESS_LIMIT
39857c8a661SMike Rapoport #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
39957c8a661SMike Rapoport #endif
40057c8a661SMike Rapoport 
4018a770c2aSMike Rapoport phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
4028a770c2aSMike Rapoport 				      phys_addr_t start, phys_addr_t end);
4038676af1fSAslan Bakirov phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
4048676af1fSAslan Bakirov 				      phys_addr_t align, phys_addr_t start,
4058676af1fSAslan Bakirov 				      phys_addr_t end, int nid, bool exact_nid);
4069a8dd708SMike Rapoport phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
4079d1e2492SBenjamin Herrenschmidt 
memblock_phys_alloc(phys_addr_t size,phys_addr_t align)408d7f55471SJackie Liu static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
409ecc3e771SMike Rapoport 						       phys_addr_t align)
410ecc3e771SMike Rapoport {
411ecc3e771SMike Rapoport 	return memblock_phys_alloc_range(size, align, 0,
412ecc3e771SMike Rapoport 					 MEMBLOCK_ALLOC_ACCESSIBLE);
413ecc3e771SMike Rapoport }
414e63075a3SBenjamin Herrenschmidt 
4150ac398b1SYunfeng Ye void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
4160ac398b1SYunfeng Ye 				 phys_addr_t min_addr, phys_addr_t max_addr,
4170ac398b1SYunfeng Ye 				 int nid);
41857c8a661SMike Rapoport void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
41957c8a661SMike Rapoport 				 phys_addr_t min_addr, phys_addr_t max_addr,
42057c8a661SMike Rapoport 				 int nid);
42157c8a661SMike Rapoport void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
42257c8a661SMike Rapoport 			     phys_addr_t min_addr, phys_addr_t max_addr,
42357c8a661SMike Rapoport 			     int nid);
42457c8a661SMike Rapoport 
memblock_alloc(phys_addr_t size,phys_addr_t align)4255bdba520SFaiyaz Mohammed static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
42657c8a661SMike Rapoport {
42757c8a661SMike Rapoport 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
42857c8a661SMike Rapoport 				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
42957c8a661SMike Rapoport }
43057c8a661SMike Rapoport 
memblock_alloc_raw(phys_addr_t size,phys_addr_t align)4315bdba520SFaiyaz Mohammed static inline void *memblock_alloc_raw(phys_addr_t size,
43257c8a661SMike Rapoport 					       phys_addr_t align)
43357c8a661SMike Rapoport {
43457c8a661SMike Rapoport 	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
43557c8a661SMike Rapoport 					  MEMBLOCK_ALLOC_ACCESSIBLE,
43657c8a661SMike Rapoport 					  NUMA_NO_NODE);
43757c8a661SMike Rapoport }
43857c8a661SMike Rapoport 
memblock_alloc_from(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr)4395bdba520SFaiyaz Mohammed static inline void *memblock_alloc_from(phys_addr_t size,
44057c8a661SMike Rapoport 						phys_addr_t align,
44157c8a661SMike Rapoport 						phys_addr_t min_addr)
44257c8a661SMike Rapoport {
44357c8a661SMike Rapoport 	return memblock_alloc_try_nid(size, align, min_addr,
44457c8a661SMike Rapoport 				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
44557c8a661SMike Rapoport }
44657c8a661SMike Rapoport 
memblock_alloc_low(phys_addr_t size,phys_addr_t align)4475bdba520SFaiyaz Mohammed static inline void *memblock_alloc_low(phys_addr_t size,
44857c8a661SMike Rapoport 					       phys_addr_t align)
44957c8a661SMike Rapoport {
45057c8a661SMike Rapoport 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
45157c8a661SMike Rapoport 				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
45257c8a661SMike Rapoport }
45357c8a661SMike Rapoport 
memblock_alloc_node(phys_addr_t size,phys_addr_t align,int nid)4545bdba520SFaiyaz Mohammed static inline void *memblock_alloc_node(phys_addr_t size,
45557c8a661SMike Rapoport 						phys_addr_t align, int nid)
45657c8a661SMike Rapoport {
45757c8a661SMike Rapoport 	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
45857c8a661SMike Rapoport 				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
45957c8a661SMike Rapoport }
46057c8a661SMike Rapoport 
46179442ed1STang Chen /*
46279442ed1STang Chen  * Set the allocation direction to bottom-up or top-down.
46379442ed1STang Chen  */
memblock_set_bottom_up(bool enable)464a024b7c2SMike Rapoport static inline __init_memblock void memblock_set_bottom_up(bool enable)
46579442ed1STang Chen {
46679442ed1STang Chen 	memblock.bottom_up = enable;
46779442ed1STang Chen }
46879442ed1STang Chen 
46979442ed1STang Chen /*
47079442ed1STang Chen  * Check if the allocation direction is bottom-up or not.
47179442ed1STang Chen  * if this is true, that said, memblock will allocate memory
47279442ed1STang Chen  * in bottom-up direction.
47379442ed1STang Chen  */
memblock_bottom_up(void)474a024b7c2SMike Rapoport static inline __init_memblock bool memblock_bottom_up(void)
47579442ed1STang Chen {
47679442ed1STang Chen 	return memblock.bottom_up;
47779442ed1STang Chen }
47879442ed1STang Chen 
479581adcbeSTejun Heo phys_addr_t memblock_phys_mem_size(void);
4808907de5dSSrikar Dronamraju phys_addr_t memblock_reserved_size(void);
481581adcbeSTejun Heo phys_addr_t memblock_start_of_DRAM(void);
482581adcbeSTejun Heo phys_addr_t memblock_end_of_DRAM(void);
483581adcbeSTejun Heo void memblock_enforce_memory_limit(phys_addr_t memory_limit);
484c9ca9b4eSAKASHI Takahiro void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
485a571d4ebSDennis Chen void memblock_mem_limit_remove_map(phys_addr_t limit);
486b4ad0c7eSYaowei Bai bool memblock_is_memory(phys_addr_t addr);
487937f0c26SYaowei Bai bool memblock_is_map_memory(phys_addr_t addr);
488937f0c26SYaowei Bai bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
489b4ad0c7eSYaowei Bai bool memblock_is_reserved(phys_addr_t addr);
490c5c5c9d1STang Chen bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
49195f72d1eSYinghai Lu 
49287c55870SMike Rapoport void memblock_dump_all(void);
49395f72d1eSYinghai Lu 
494e63075a3SBenjamin Herrenschmidt /**
495e63075a3SBenjamin Herrenschmidt  * memblock_set_current_limit - Set the current allocation limit to allow
496e63075a3SBenjamin Herrenschmidt  *                         limiting allocations to what is currently
497e63075a3SBenjamin Herrenschmidt  *                         accessible during boot
498e63075a3SBenjamin Herrenschmidt  * @limit: New limit value (physical address)
499e63075a3SBenjamin Herrenschmidt  */
500581adcbeSTejun Heo void memblock_set_current_limit(phys_addr_t limit);
501e63075a3SBenjamin Herrenschmidt 
50235a1f0bdSBenjamin Herrenschmidt 
503fec51014SLaura Abbott phys_addr_t memblock_get_current_limit(void);
504fec51014SLaura Abbott 
5055b385f25SBenjamin Herrenschmidt /*
5065b385f25SBenjamin Herrenschmidt  * pfn conversion functions
5075b385f25SBenjamin Herrenschmidt  *
5085b385f25SBenjamin Herrenschmidt  * While the memory MEMBLOCKs should always be page aligned, the reserved
5095b385f25SBenjamin Herrenschmidt  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
5105b385f25SBenjamin Herrenschmidt  * idea of what they return for such non aligned MEMBLOCKs.
5115b385f25SBenjamin Herrenschmidt  */
5125b385f25SBenjamin Herrenschmidt 
5135b385f25SBenjamin Herrenschmidt /**
51447cec443SMike Rapoport  * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
5155b385f25SBenjamin Herrenschmidt  * @reg: memblock_region structure
51647cec443SMike Rapoport  *
51747cec443SMike Rapoport  * Return: the lowest pfn intersecting with the memory region
5185b385f25SBenjamin Herrenschmidt  */
memblock_region_memory_base_pfn(const struct memblock_region * reg)519c7fc2de0SYinghai Lu static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
5205b385f25SBenjamin Herrenschmidt {
521c7fc2de0SYinghai Lu 	return PFN_UP(reg->base);
5225b385f25SBenjamin Herrenschmidt }
5235b385f25SBenjamin Herrenschmidt 
5245b385f25SBenjamin Herrenschmidt /**
52547cec443SMike Rapoport  * memblock_region_memory_end_pfn - get the end pfn of the memory region
5265b385f25SBenjamin Herrenschmidt  * @reg: memblock_region structure
52747cec443SMike Rapoport  *
52847cec443SMike Rapoport  * Return: the end_pfn of the reserved region
5295b385f25SBenjamin Herrenschmidt  */
memblock_region_memory_end_pfn(const struct memblock_region * reg)530c7fc2de0SYinghai Lu static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
5315b385f25SBenjamin Herrenschmidt {
532c7fc2de0SYinghai Lu 	return PFN_DOWN(reg->base + reg->size);
5335b385f25SBenjamin Herrenschmidt }
5345b385f25SBenjamin Herrenschmidt 
5355b385f25SBenjamin Herrenschmidt /**
53647cec443SMike Rapoport  * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
5375b385f25SBenjamin Herrenschmidt  * @reg: memblock_region structure
53847cec443SMike Rapoport  *
53947cec443SMike Rapoport  * Return: the lowest pfn intersecting with the reserved region
5405b385f25SBenjamin Herrenschmidt  */
memblock_region_reserved_base_pfn(const struct memblock_region * reg)541c7fc2de0SYinghai Lu static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
5425b385f25SBenjamin Herrenschmidt {
543c7fc2de0SYinghai Lu 	return PFN_DOWN(reg->base);
5445b385f25SBenjamin Herrenschmidt }
5455b385f25SBenjamin Herrenschmidt 
5465b385f25SBenjamin Herrenschmidt /**
54747cec443SMike Rapoport  * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
5485b385f25SBenjamin Herrenschmidt  * @reg: memblock_region structure
54947cec443SMike Rapoport  *
55047cec443SMike Rapoport  * Return: the end_pfn of the reserved region
5515b385f25SBenjamin Herrenschmidt  */
memblock_region_reserved_end_pfn(const struct memblock_region * reg)552c7fc2de0SYinghai Lu static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
5535b385f25SBenjamin Herrenschmidt {
554c7fc2de0SYinghai Lu 	return PFN_UP(reg->base + reg->size);
5555b385f25SBenjamin Herrenschmidt }
5565b385f25SBenjamin Herrenschmidt 
557cc6de168SMike Rapoport /**
558cc6de168SMike Rapoport  * for_each_mem_region - itereate over memory regions
559cc6de168SMike Rapoport  * @region: loop variable
560cc6de168SMike Rapoport  */
561cc6de168SMike Rapoport #define for_each_mem_region(region)					\
562cc6de168SMike Rapoport 	for (region = memblock.memory.regions;				\
563cc6de168SMike Rapoport 	     region < (memblock.memory.regions + memblock.memory.cnt);	\
564cc6de168SMike Rapoport 	     region++)
565cc6de168SMike Rapoport 
566cc6de168SMike Rapoport /**
567cc6de168SMike Rapoport  * for_each_reserved_mem_region - itereate over reserved memory regions
568cc6de168SMike Rapoport  * @region: loop variable
569cc6de168SMike Rapoport  */
570cc6de168SMike Rapoport #define for_each_reserved_mem_region(region)				\
571cc6de168SMike Rapoport 	for (region = memblock.reserved.regions;			\
572cc6de168SMike Rapoport 	     region < (memblock.reserved.regions + memblock.reserved.cnt); \
5735b385f25SBenjamin Herrenschmidt 	     region++)
5745b385f25SBenjamin Herrenschmidt 
57557c8a661SMike Rapoport extern void *alloc_large_system_hash(const char *tablename,
57657c8a661SMike Rapoport 				     unsigned long bucketsize,
57757c8a661SMike Rapoport 				     unsigned long numentries,
57857c8a661SMike Rapoport 				     int scale,
57957c8a661SMike Rapoport 				     int flags,
58057c8a661SMike Rapoport 				     unsigned int *_hash_shift,
58157c8a661SMike Rapoport 				     unsigned int *_hash_mask,
58257c8a661SMike Rapoport 				     unsigned long low_limit,
58357c8a661SMike Rapoport 				     unsigned long high_limit);
58457c8a661SMike Rapoport 
58557c8a661SMike Rapoport #define HASH_EARLY	0x00000001	/* Allocating during early boot? */
5863fade62bSMiaohe Lin #define HASH_ZERO	0x00000002	/* Zero allocated hash table */
58757c8a661SMike Rapoport 
58857c8a661SMike Rapoport /* Only NUMA needs hash distribution. 64bit NUMA architectures have
58957c8a661SMike Rapoport  * sufficient vmalloc space.
59057c8a661SMike Rapoport  */
59157c8a661SMike Rapoport #ifdef CONFIG_NUMA
59257c8a661SMike Rapoport #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
59357c8a661SMike Rapoport extern int hashdist;		/* Distribute hashes across NUMA nodes? */
59457c8a661SMike Rapoport #else
59557c8a661SMike Rapoport #define hashdist (0)
59657c8a661SMike Rapoport #endif
59757c8a661SMike Rapoport 
5984a20799dSVladimir Murzin #ifdef CONFIG_MEMTEST
5993f32c49eSKefeng Wang void early_memtest(phys_addr_t start, phys_addr_t end);
6003f32c49eSKefeng Wang void memtest_report_meminfo(struct seq_file *m);
6014a20799dSVladimir Murzin #else
early_memtest(phys_addr_t start,phys_addr_t end)6023f32c49eSKefeng Wang static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
memtest_report_meminfo(struct seq_file * m)6033f32c49eSKefeng Wang static inline void memtest_report_meminfo(struct seq_file *m) { }
6044a20799dSVladimir Murzin #endif
605f0b37fadSYinghai Lu 
60695f72d1eSYinghai Lu 
60795f72d1eSYinghai Lu #endif /* _LINUX_MEMBLOCK_H */
608