xref: /openbmc/linux/mm/sparse-vmemmap.c (revision 1d9cfee7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual Memory Map support
4  *
5  * (C) 2007 sgi. Christoph Lameter.
6  *
7  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8  * virt_to_page, page_address() to be implemented as a base offset
9  * calculation without memory access.
10  *
11  * However, virtual mappings need a page table and TLBs. Many Linux
12  * architectures already map their physical space using 1-1 mappings
13  * via TLBs. For those arches the virtual memory map is essentially
14  * for free if we use the same page size as the 1-1 mappings. In that
15  * case the overhead consists of a few additional pages that are
16  * allocated to create a view of memory for vmemmap.
17  *
18  * The architecture is expected to provide a vmemmap_populate() function
19  * to instantiate the mapping.
20  */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <asm/dma.h>
31 #include <asm/pgalloc.h>
32 
33 /*
34  * Allocate a block of memory to be used to back the virtual memory map
35  * or to back the page tables that are used to create the mapping.
36  * Uses the main allocators if they are available, else bootmem.
37  */
38 
39 static void * __ref __earlyonly_bootmem_alloc(int node,
40 				unsigned long size,
41 				unsigned long align,
42 				unsigned long goal)
43 {
44 	return memblock_alloc_try_nid_raw(size, align, goal,
45 					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
46 }
47 
48 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
49 {
50 	/* If the main allocator is up use that, fallback to bootmem. */
51 	if (slab_is_available()) {
52 		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
53 		int order = get_order(size);
54 		static bool warned;
55 		struct page *page;
56 
57 		page = alloc_pages_node(node, gfp_mask, order);
58 		if (page)
59 			return page_address(page);
60 
61 		if (!warned) {
62 			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
63 				   "vmemmap alloc failure: order:%u", order);
64 			warned = true;
65 		}
66 		return NULL;
67 	} else
68 		return __earlyonly_bootmem_alloc(node, size, size,
69 				__pa(MAX_DMA_ADDRESS));
70 }
71 
72 /* need to make sure size is all the same during early stage */
73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
74 {
75 	void *ptr = sparse_buffer_alloc(size);
76 
77 	if (!ptr)
78 		ptr = vmemmap_alloc_block(size, node);
79 	return ptr;
80 }
81 
82 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
83 {
84 	return altmap->base_pfn + altmap->reserve + altmap->alloc
85 		+ altmap->align;
86 }
87 
88 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
89 {
90 	unsigned long allocated = altmap->alloc + altmap->align;
91 
92 	if (altmap->free > allocated)
93 		return altmap->free - allocated;
94 	return 0;
95 }
96 
97 /**
98  * altmap_alloc_block_buf - allocate pages from the device page map
99  * @altmap:	device page map
100  * @size:	size (in bytes) of the allocation
101  *
102  * Allocations are aligned to the size of the request.
103  */
104 void * __meminit altmap_alloc_block_buf(unsigned long size,
105 		struct vmem_altmap *altmap)
106 {
107 	unsigned long pfn, nr_pfns, nr_align;
108 
109 	if (size & ~PAGE_MASK) {
110 		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
111 				__func__, size);
112 		return NULL;
113 	}
114 
115 	pfn = vmem_altmap_next_pfn(altmap);
116 	nr_pfns = size >> PAGE_SHIFT;
117 	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
118 	nr_align = ALIGN(pfn, nr_align) - pfn;
119 	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
120 		return NULL;
121 
122 	altmap->alloc += nr_pfns;
123 	altmap->align += nr_align;
124 	pfn += nr_align;
125 
126 	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
127 			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
128 	return __va(__pfn_to_phys(pfn));
129 }
130 
131 void __meminit vmemmap_verify(pte_t *pte, int node,
132 				unsigned long start, unsigned long end)
133 {
134 	unsigned long pfn = pte_pfn(*pte);
135 	int actual_node = early_pfn_to_nid(pfn);
136 
137 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
138 		pr_warn("[%lx-%lx] potential offnode page_structs\n",
139 			start, end - 1);
140 }
141 
142 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
143 				       struct vmem_altmap *altmap)
144 {
145 	pte_t *pte = pte_offset_kernel(pmd, addr);
146 	if (pte_none(*pte)) {
147 		pte_t entry;
148 		void *p;
149 
150 		if (altmap)
151 			p = altmap_alloc_block_buf(PAGE_SIZE, altmap);
152 		else
153 			p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
154 		if (!p)
155 			return NULL;
156 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
157 		set_pte_at(&init_mm, addr, pte, entry);
158 	}
159 	return pte;
160 }
161 
162 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
163 {
164 	void *p = vmemmap_alloc_block(size, node);
165 
166 	if (!p)
167 		return NULL;
168 	memset(p, 0, size);
169 
170 	return p;
171 }
172 
173 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
174 {
175 	pmd_t *pmd = pmd_offset(pud, addr);
176 	if (pmd_none(*pmd)) {
177 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
178 		if (!p)
179 			return NULL;
180 		pmd_populate_kernel(&init_mm, pmd, p);
181 	}
182 	return pmd;
183 }
184 
185 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
186 {
187 	pud_t *pud = pud_offset(p4d, addr);
188 	if (pud_none(*pud)) {
189 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
190 		if (!p)
191 			return NULL;
192 		pud_populate(&init_mm, pud, p);
193 	}
194 	return pud;
195 }
196 
197 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
198 {
199 	p4d_t *p4d = p4d_offset(pgd, addr);
200 	if (p4d_none(*p4d)) {
201 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
202 		if (!p)
203 			return NULL;
204 		p4d_populate(&init_mm, p4d, p);
205 	}
206 	return p4d;
207 }
208 
209 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
210 {
211 	pgd_t *pgd = pgd_offset_k(addr);
212 	if (pgd_none(*pgd)) {
213 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
214 		if (!p)
215 			return NULL;
216 		pgd_populate(&init_mm, pgd, p);
217 	}
218 	return pgd;
219 }
220 
221 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
222 					 int node, struct vmem_altmap *altmap)
223 {
224 	unsigned long addr = start;
225 	pgd_t *pgd;
226 	p4d_t *p4d;
227 	pud_t *pud;
228 	pmd_t *pmd;
229 	pte_t *pte;
230 
231 	for (; addr < end; addr += PAGE_SIZE) {
232 		pgd = vmemmap_pgd_populate(addr, node);
233 		if (!pgd)
234 			return -ENOMEM;
235 		p4d = vmemmap_p4d_populate(pgd, addr, node);
236 		if (!p4d)
237 			return -ENOMEM;
238 		pud = vmemmap_pud_populate(p4d, addr, node);
239 		if (!pud)
240 			return -ENOMEM;
241 		pmd = vmemmap_pmd_populate(pud, addr, node);
242 		if (!pmd)
243 			return -ENOMEM;
244 		pte = vmemmap_pte_populate(pmd, addr, node, altmap);
245 		if (!pte)
246 			return -ENOMEM;
247 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
248 	}
249 
250 	return 0;
251 }
252 
253 struct page * __meminit __populate_section_memmap(unsigned long pfn,
254 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
255 {
256 	unsigned long start;
257 	unsigned long end;
258 
259 	/*
260 	 * The minimum granularity of memmap extensions is
261 	 * PAGES_PER_SUBSECTION as allocations are tracked in the
262 	 * 'subsection_map' bitmap of the section.
263 	 */
264 	end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
265 	pfn &= PAGE_SUBSECTION_MASK;
266 	nr_pages = end - pfn;
267 
268 	start = (unsigned long) pfn_to_page(pfn);
269 	end = start + nr_pages * sizeof(struct page);
270 
271 	if (vmemmap_populate(start, end, nid, altmap))
272 		return NULL;
273 
274 	return pfn_to_page(pfn);
275 }
276