xref: /openbmc/linux/mm/sparse-vmemmap.c (revision 6f52b16c5b29b89d92c0e7236f4655dc8491ad70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual Memory Map support
4  *
5  * (C) 2007 sgi. Christoph Lameter.
6  *
7  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8  * virt_to_page, page_address() to be implemented as a base offset
9  * calculation without memory access.
10  *
11  * However, virtual mappings need a page table and TLBs. Many Linux
12  * architectures already map their physical space using 1-1 mappings
13  * via TLBs. For those arches the virtual memory map is essentially
14  * for free if we use the same page size as the 1-1 mappings. In that
15  * case the overhead consists of a few additional pages that are
16  * allocated to create a view of memory for vmemmap.
17  *
18  * The architecture is expected to provide a vmemmap_populate() function
19  * to instantiate the mapping.
20  */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/bootmem.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <asm/dma.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
33 
34 /*
35  * Allocate a block of memory to be used to back the virtual memory map
36  * or to back the page tables that are used to create the mapping.
37  * Uses the main allocators if they are available, else bootmem.
38  */
39 
40 static void * __ref __earlyonly_bootmem_alloc(int node,
41 				unsigned long size,
42 				unsigned long align,
43 				unsigned long goal)
44 {
45 	return memblock_virt_alloc_try_nid(size, align, goal,
46 					    BOOTMEM_ALLOC_ACCESSIBLE, node);
47 }
48 
49 static void *vmemmap_buf;
50 static void *vmemmap_buf_end;
51 
52 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
53 {
54 	/* If the main allocator is up use that, fallback to bootmem. */
55 	if (slab_is_available()) {
56 		struct page *page;
57 
58 		page = alloc_pages_node(node,
59 			GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
60 			get_order(size));
61 		if (page)
62 			return page_address(page);
63 		return NULL;
64 	} else
65 		return __earlyonly_bootmem_alloc(node, size, size,
66 				__pa(MAX_DMA_ADDRESS));
67 }
68 
69 /* need to make sure size is all the same during early stage */
70 static void * __meminit alloc_block_buf(unsigned long size, int node)
71 {
72 	void *ptr;
73 
74 	if (!vmemmap_buf)
75 		return vmemmap_alloc_block(size, node);
76 
77 	/* take the from buf */
78 	ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
79 	if (ptr + size > vmemmap_buf_end)
80 		return vmemmap_alloc_block(size, node);
81 
82 	vmemmap_buf = ptr + size;
83 
84 	return ptr;
85 }
86 
87 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
88 {
89 	return altmap->base_pfn + altmap->reserve + altmap->alloc
90 		+ altmap->align;
91 }
92 
93 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
94 {
95 	unsigned long allocated = altmap->alloc + altmap->align;
96 
97 	if (altmap->free > allocated)
98 		return altmap->free - allocated;
99 	return 0;
100 }
101 
102 /**
103  * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
104  * @altmap - reserved page pool for the allocation
105  * @nr_pfns - size (in pages) of the allocation
106  *
107  * Allocations are aligned to the size of the request
108  */
109 static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
110 		unsigned long nr_pfns)
111 {
112 	unsigned long pfn = vmem_altmap_next_pfn(altmap);
113 	unsigned long nr_align;
114 
115 	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
116 	nr_align = ALIGN(pfn, nr_align) - pfn;
117 
118 	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
119 		return ULONG_MAX;
120 	altmap->alloc += nr_pfns;
121 	altmap->align += nr_align;
122 	return pfn + nr_align;
123 }
124 
125 static void * __meminit altmap_alloc_block_buf(unsigned long size,
126 		struct vmem_altmap *altmap)
127 {
128 	unsigned long pfn, nr_pfns;
129 	void *ptr;
130 
131 	if (size & ~PAGE_MASK) {
132 		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
133 				__func__, size);
134 		return NULL;
135 	}
136 
137 	nr_pfns = size >> PAGE_SHIFT;
138 	pfn = vmem_altmap_alloc(altmap, nr_pfns);
139 	if (pfn < ULONG_MAX)
140 		ptr = __va(__pfn_to_phys(pfn));
141 	else
142 		ptr = NULL;
143 	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
144 			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
145 
146 	return ptr;
147 }
148 
149 /* need to make sure size is all the same during early stage */
150 void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
151 		struct vmem_altmap *altmap)
152 {
153 	if (altmap)
154 		return altmap_alloc_block_buf(size, altmap);
155 	return alloc_block_buf(size, node);
156 }
157 
158 void __meminit vmemmap_verify(pte_t *pte, int node,
159 				unsigned long start, unsigned long end)
160 {
161 	unsigned long pfn = pte_pfn(*pte);
162 	int actual_node = early_pfn_to_nid(pfn);
163 
164 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
165 		pr_warn("[%lx-%lx] potential offnode page_structs\n",
166 			start, end - 1);
167 }
168 
169 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
170 {
171 	pte_t *pte = pte_offset_kernel(pmd, addr);
172 	if (pte_none(*pte)) {
173 		pte_t entry;
174 		void *p = alloc_block_buf(PAGE_SIZE, node);
175 		if (!p)
176 			return NULL;
177 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
178 		set_pte_at(&init_mm, addr, pte, entry);
179 	}
180 	return pte;
181 }
182 
183 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
184 {
185 	pmd_t *pmd = pmd_offset(pud, addr);
186 	if (pmd_none(*pmd)) {
187 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
188 		if (!p)
189 			return NULL;
190 		pmd_populate_kernel(&init_mm, pmd, p);
191 	}
192 	return pmd;
193 }
194 
195 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
196 {
197 	pud_t *pud = pud_offset(p4d, addr);
198 	if (pud_none(*pud)) {
199 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
200 		if (!p)
201 			return NULL;
202 		pud_populate(&init_mm, pud, p);
203 	}
204 	return pud;
205 }
206 
207 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
208 {
209 	p4d_t *p4d = p4d_offset(pgd, addr);
210 	if (p4d_none(*p4d)) {
211 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
212 		if (!p)
213 			return NULL;
214 		p4d_populate(&init_mm, p4d, p);
215 	}
216 	return p4d;
217 }
218 
219 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
220 {
221 	pgd_t *pgd = pgd_offset_k(addr);
222 	if (pgd_none(*pgd)) {
223 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
224 		if (!p)
225 			return NULL;
226 		pgd_populate(&init_mm, pgd, p);
227 	}
228 	return pgd;
229 }
230 
231 int __meminit vmemmap_populate_basepages(unsigned long start,
232 					 unsigned long end, int node)
233 {
234 	unsigned long addr = start;
235 	pgd_t *pgd;
236 	p4d_t *p4d;
237 	pud_t *pud;
238 	pmd_t *pmd;
239 	pte_t *pte;
240 
241 	for (; addr < end; addr += PAGE_SIZE) {
242 		pgd = vmemmap_pgd_populate(addr, node);
243 		if (!pgd)
244 			return -ENOMEM;
245 		p4d = vmemmap_p4d_populate(pgd, addr, node);
246 		if (!p4d)
247 			return -ENOMEM;
248 		pud = vmemmap_pud_populate(p4d, addr, node);
249 		if (!pud)
250 			return -ENOMEM;
251 		pmd = vmemmap_pmd_populate(pud, addr, node);
252 		if (!pmd)
253 			return -ENOMEM;
254 		pte = vmemmap_pte_populate(pmd, addr, node);
255 		if (!pte)
256 			return -ENOMEM;
257 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
258 	}
259 
260 	return 0;
261 }
262 
263 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
264 {
265 	unsigned long start;
266 	unsigned long end;
267 	struct page *map;
268 
269 	map = pfn_to_page(pnum * PAGES_PER_SECTION);
270 	start = (unsigned long)map;
271 	end = (unsigned long)(map + PAGES_PER_SECTION);
272 
273 	if (vmemmap_populate(start, end, nid))
274 		return NULL;
275 
276 	return map;
277 }
278 
279 void __init sparse_mem_maps_populate_node(struct page **map_map,
280 					  unsigned long pnum_begin,
281 					  unsigned long pnum_end,
282 					  unsigned long map_count, int nodeid)
283 {
284 	unsigned long pnum;
285 	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
286 	void *vmemmap_buf_start;
287 
288 	size = ALIGN(size, PMD_SIZE);
289 	vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
290 			 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
291 
292 	if (vmemmap_buf_start) {
293 		vmemmap_buf = vmemmap_buf_start;
294 		vmemmap_buf_end = vmemmap_buf_start + size * map_count;
295 	}
296 
297 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
298 		struct mem_section *ms;
299 
300 		if (!present_section_nr(pnum))
301 			continue;
302 
303 		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
304 		if (map_map[pnum])
305 			continue;
306 		ms = __nr_to_section(pnum);
307 		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
308 		       __func__);
309 		ms->section_mem_map = 0;
310 	}
311 
312 	if (vmemmap_buf_start) {
313 		/* need to free left buf */
314 		memblock_free_early(__pa(vmemmap_buf),
315 				    vmemmap_buf_end - vmemmap_buf);
316 		vmemmap_buf = NULL;
317 		vmemmap_buf_end = NULL;
318 	}
319 }
320