xref: /openbmc/linux/mm/sparse-vmemmap.c (revision ad2fa3717b74994a22519dbe045757135db00dbb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual Memory Map support
4  *
5  * (C) 2007 sgi. Christoph Lameter.
6  *
7  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8  * virt_to_page, page_address() to be implemented as a base offset
9  * calculation without memory access.
10  *
11  * However, virtual mappings need a page table and TLBs. Many Linux
12  * architectures already map their physical space using 1-1 mappings
13  * via TLBs. For those arches the virtual memory map is essentially
14  * for free if we use the same page size as the 1-1 mappings. In that
15  * case the overhead consists of a few additional pages that are
16  * allocated to create a view of memory for vmemmap.
17  *
18  * The architecture is expected to provide a vmemmap_populate() function
19  * to instantiate the mapping.
20  */
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/bootmem_info.h>
32 
33 #include <asm/dma.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
36 
37 /**
38  * struct vmemmap_remap_walk - walk vmemmap page table
39  *
40  * @remap_pte:		called for each lowest-level entry (PTE).
41  * @reuse_page:		the page which is reused for the tail vmemmap pages.
42  * @reuse_addr:		the virtual address of the @reuse_page page.
43  * @vmemmap_pages:	the list head of the vmemmap pages that can be freed
44  *			or is mapped from.
45  */
46 struct vmemmap_remap_walk {
47 	void (*remap_pte)(pte_t *pte, unsigned long addr,
48 			  struct vmemmap_remap_walk *walk);
49 	struct page *reuse_page;
50 	unsigned long reuse_addr;
51 	struct list_head *vmemmap_pages;
52 };
53 
54 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
55 			      unsigned long end,
56 			      struct vmemmap_remap_walk *walk)
57 {
58 	pte_t *pte = pte_offset_kernel(pmd, addr);
59 
60 	/*
61 	 * The reuse_page is found 'first' in table walk before we start
62 	 * remapping (which is calling @walk->remap_pte).
63 	 */
64 	if (!walk->reuse_page) {
65 		walk->reuse_page = pte_page(*pte);
66 		/*
67 		 * Because the reuse address is part of the range that we are
68 		 * walking, skip the reuse address range.
69 		 */
70 		addr += PAGE_SIZE;
71 		pte++;
72 	}
73 
74 	for (; addr != end; addr += PAGE_SIZE, pte++)
75 		walk->remap_pte(pte, addr, walk);
76 }
77 
78 static void vmemmap_pmd_range(pud_t *pud, unsigned long addr,
79 			      unsigned long end,
80 			      struct vmemmap_remap_walk *walk)
81 {
82 	pmd_t *pmd;
83 	unsigned long next;
84 
85 	pmd = pmd_offset(pud, addr);
86 	do {
87 		BUG_ON(pmd_leaf(*pmd));
88 
89 		next = pmd_addr_end(addr, end);
90 		vmemmap_pte_range(pmd, addr, next, walk);
91 	} while (pmd++, addr = next, addr != end);
92 }
93 
94 static void vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
95 			      unsigned long end,
96 			      struct vmemmap_remap_walk *walk)
97 {
98 	pud_t *pud;
99 	unsigned long next;
100 
101 	pud = pud_offset(p4d, addr);
102 	do {
103 		next = pud_addr_end(addr, end);
104 		vmemmap_pmd_range(pud, addr, next, walk);
105 	} while (pud++, addr = next, addr != end);
106 }
107 
108 static void vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
109 			      unsigned long end,
110 			      struct vmemmap_remap_walk *walk)
111 {
112 	p4d_t *p4d;
113 	unsigned long next;
114 
115 	p4d = p4d_offset(pgd, addr);
116 	do {
117 		next = p4d_addr_end(addr, end);
118 		vmemmap_pud_range(p4d, addr, next, walk);
119 	} while (p4d++, addr = next, addr != end);
120 }
121 
122 static void vmemmap_remap_range(unsigned long start, unsigned long end,
123 				struct vmemmap_remap_walk *walk)
124 {
125 	unsigned long addr = start;
126 	unsigned long next;
127 	pgd_t *pgd;
128 
129 	VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
130 	VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
131 
132 	pgd = pgd_offset_k(addr);
133 	do {
134 		next = pgd_addr_end(addr, end);
135 		vmemmap_p4d_range(pgd, addr, next, walk);
136 	} while (pgd++, addr = next, addr != end);
137 
138 	/*
139 	 * We only change the mapping of the vmemmap virtual address range
140 	 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
141 	 * belongs to the range.
142 	 */
143 	flush_tlb_kernel_range(start + PAGE_SIZE, end);
144 }
145 
146 /*
147  * Free a vmemmap page. A vmemmap page can be allocated from the memblock
148  * allocator or buddy allocator. If the PG_reserved flag is set, it means
149  * that it allocated from the memblock allocator, just free it via the
150  * free_bootmem_page(). Otherwise, use __free_page().
151  */
152 static inline void free_vmemmap_page(struct page *page)
153 {
154 	if (PageReserved(page))
155 		free_bootmem_page(page);
156 	else
157 		__free_page(page);
158 }
159 
160 /* Free a list of the vmemmap pages */
161 static void free_vmemmap_page_list(struct list_head *list)
162 {
163 	struct page *page, *next;
164 
165 	list_for_each_entry_safe(page, next, list, lru) {
166 		list_del(&page->lru);
167 		free_vmemmap_page(page);
168 	}
169 }
170 
171 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
172 			      struct vmemmap_remap_walk *walk)
173 {
174 	/*
175 	 * Remap the tail pages as read-only to catch illegal write operation
176 	 * to the tail pages.
177 	 */
178 	pgprot_t pgprot = PAGE_KERNEL_RO;
179 	pte_t entry = mk_pte(walk->reuse_page, pgprot);
180 	struct page *page = pte_page(*pte);
181 
182 	list_add(&page->lru, walk->vmemmap_pages);
183 	set_pte_at(&init_mm, addr, pte, entry);
184 }
185 
186 /**
187  * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
188  *			to the page which @reuse is mapped to, then free vmemmap
189  *			which the range are mapped to.
190  * @start:	start address of the vmemmap virtual address range that we want
191  *		to remap.
192  * @end:	end address of the vmemmap virtual address range that we want to
193  *		remap.
194  * @reuse:	reuse address.
195  *
196  * Note: This function depends on vmemmap being base page mapped. Please make
197  * sure that we disable PMD mapping of vmemmap pages when calling this function.
198  */
199 void vmemmap_remap_free(unsigned long start, unsigned long end,
200 			unsigned long reuse)
201 {
202 	LIST_HEAD(vmemmap_pages);
203 	struct vmemmap_remap_walk walk = {
204 		.remap_pte	= vmemmap_remap_pte,
205 		.reuse_addr	= reuse,
206 		.vmemmap_pages	= &vmemmap_pages,
207 	};
208 
209 	/*
210 	 * In order to make remapping routine most efficient for the huge pages,
211 	 * the routine of vmemmap page table walking has the following rules
212 	 * (see more details from the vmemmap_pte_range()):
213 	 *
214 	 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
215 	 *   should be continuous.
216 	 * - The @reuse address is part of the range [@reuse, @end) that we are
217 	 *   walking which is passed to vmemmap_remap_range().
218 	 * - The @reuse address is the first in the complete range.
219 	 *
220 	 * So we need to make sure that @start and @reuse meet the above rules.
221 	 */
222 	BUG_ON(start - reuse != PAGE_SIZE);
223 
224 	vmemmap_remap_range(reuse, end, &walk);
225 	free_vmemmap_page_list(&vmemmap_pages);
226 }
227 
228 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
229 				struct vmemmap_remap_walk *walk)
230 {
231 	pgprot_t pgprot = PAGE_KERNEL;
232 	struct page *page;
233 	void *to;
234 
235 	BUG_ON(pte_page(*pte) != walk->reuse_page);
236 
237 	page = list_first_entry(walk->vmemmap_pages, struct page, lru);
238 	list_del(&page->lru);
239 	to = page_to_virt(page);
240 	copy_page(to, (void *)walk->reuse_addr);
241 
242 	set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
243 }
244 
245 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
246 				   gfp_t gfp_mask, struct list_head *list)
247 {
248 	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
249 	int nid = page_to_nid((struct page *)start);
250 	struct page *page, *next;
251 
252 	while (nr_pages--) {
253 		page = alloc_pages_node(nid, gfp_mask, 0);
254 		if (!page)
255 			goto out;
256 		list_add_tail(&page->lru, list);
257 	}
258 
259 	return 0;
260 out:
261 	list_for_each_entry_safe(page, next, list, lru)
262 		__free_pages(page, 0);
263 	return -ENOMEM;
264 }
265 
266 /**
267  * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
268  *			 to the page which is from the @vmemmap_pages
269  *			 respectively.
270  * @start:	start address of the vmemmap virtual address range that we want
271  *		to remap.
272  * @end:	end address of the vmemmap virtual address range that we want to
273  *		remap.
274  * @reuse:	reuse address.
275  * @gfp_mask:	GFP flag for allocating vmemmap pages.
276  */
277 int vmemmap_remap_alloc(unsigned long start, unsigned long end,
278 			unsigned long reuse, gfp_t gfp_mask)
279 {
280 	LIST_HEAD(vmemmap_pages);
281 	struct vmemmap_remap_walk walk = {
282 		.remap_pte	= vmemmap_restore_pte,
283 		.reuse_addr	= reuse,
284 		.vmemmap_pages	= &vmemmap_pages,
285 	};
286 
287 	/* See the comment in the vmemmap_remap_free(). */
288 	BUG_ON(start - reuse != PAGE_SIZE);
289 
290 	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
291 
292 	if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
293 		return -ENOMEM;
294 
295 	vmemmap_remap_range(reuse, end, &walk);
296 
297 	return 0;
298 }
299 
300 /*
301  * Allocate a block of memory to be used to back the virtual memory map
302  * or to back the page tables that are used to create the mapping.
303  * Uses the main allocators if they are available, else bootmem.
304  */
305 
306 static void * __ref __earlyonly_bootmem_alloc(int node,
307 				unsigned long size,
308 				unsigned long align,
309 				unsigned long goal)
310 {
311 	return memblock_alloc_try_nid_raw(size, align, goal,
312 					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
313 }
314 
315 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
316 {
317 	/* If the main allocator is up use that, fallback to bootmem. */
318 	if (slab_is_available()) {
319 		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
320 		int order = get_order(size);
321 		static bool warned;
322 		struct page *page;
323 
324 		page = alloc_pages_node(node, gfp_mask, order);
325 		if (page)
326 			return page_address(page);
327 
328 		if (!warned) {
329 			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
330 				   "vmemmap alloc failure: order:%u", order);
331 			warned = true;
332 		}
333 		return NULL;
334 	} else
335 		return __earlyonly_bootmem_alloc(node, size, size,
336 				__pa(MAX_DMA_ADDRESS));
337 }
338 
339 static void * __meminit altmap_alloc_block_buf(unsigned long size,
340 					       struct vmem_altmap *altmap);
341 
342 /* need to make sure size is all the same during early stage */
343 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
344 					 struct vmem_altmap *altmap)
345 {
346 	void *ptr;
347 
348 	if (altmap)
349 		return altmap_alloc_block_buf(size, altmap);
350 
351 	ptr = sparse_buffer_alloc(size);
352 	if (!ptr)
353 		ptr = vmemmap_alloc_block(size, node);
354 	return ptr;
355 }
356 
357 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
358 {
359 	return altmap->base_pfn + altmap->reserve + altmap->alloc
360 		+ altmap->align;
361 }
362 
363 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
364 {
365 	unsigned long allocated = altmap->alloc + altmap->align;
366 
367 	if (altmap->free > allocated)
368 		return altmap->free - allocated;
369 	return 0;
370 }
371 
372 static void * __meminit altmap_alloc_block_buf(unsigned long size,
373 					       struct vmem_altmap *altmap)
374 {
375 	unsigned long pfn, nr_pfns, nr_align;
376 
377 	if (size & ~PAGE_MASK) {
378 		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
379 				__func__, size);
380 		return NULL;
381 	}
382 
383 	pfn = vmem_altmap_next_pfn(altmap);
384 	nr_pfns = size >> PAGE_SHIFT;
385 	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
386 	nr_align = ALIGN(pfn, nr_align) - pfn;
387 	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
388 		return NULL;
389 
390 	altmap->alloc += nr_pfns;
391 	altmap->align += nr_align;
392 	pfn += nr_align;
393 
394 	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
395 			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
396 	return __va(__pfn_to_phys(pfn));
397 }
398 
399 void __meminit vmemmap_verify(pte_t *pte, int node,
400 				unsigned long start, unsigned long end)
401 {
402 	unsigned long pfn = pte_pfn(*pte);
403 	int actual_node = early_pfn_to_nid(pfn);
404 
405 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
406 		pr_warn("[%lx-%lx] potential offnode page_structs\n",
407 			start, end - 1);
408 }
409 
410 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
411 				       struct vmem_altmap *altmap)
412 {
413 	pte_t *pte = pte_offset_kernel(pmd, addr);
414 	if (pte_none(*pte)) {
415 		pte_t entry;
416 		void *p;
417 
418 		p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
419 		if (!p)
420 			return NULL;
421 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
422 		set_pte_at(&init_mm, addr, pte, entry);
423 	}
424 	return pte;
425 }
426 
427 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
428 {
429 	void *p = vmemmap_alloc_block(size, node);
430 
431 	if (!p)
432 		return NULL;
433 	memset(p, 0, size);
434 
435 	return p;
436 }
437 
438 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
439 {
440 	pmd_t *pmd = pmd_offset(pud, addr);
441 	if (pmd_none(*pmd)) {
442 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
443 		if (!p)
444 			return NULL;
445 		pmd_populate_kernel(&init_mm, pmd, p);
446 	}
447 	return pmd;
448 }
449 
450 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
451 {
452 	pud_t *pud = pud_offset(p4d, addr);
453 	if (pud_none(*pud)) {
454 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
455 		if (!p)
456 			return NULL;
457 		pud_populate(&init_mm, pud, p);
458 	}
459 	return pud;
460 }
461 
462 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
463 {
464 	p4d_t *p4d = p4d_offset(pgd, addr);
465 	if (p4d_none(*p4d)) {
466 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
467 		if (!p)
468 			return NULL;
469 		p4d_populate(&init_mm, p4d, p);
470 	}
471 	return p4d;
472 }
473 
474 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
475 {
476 	pgd_t *pgd = pgd_offset_k(addr);
477 	if (pgd_none(*pgd)) {
478 		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
479 		if (!p)
480 			return NULL;
481 		pgd_populate(&init_mm, pgd, p);
482 	}
483 	return pgd;
484 }
485 
486 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
487 					 int node, struct vmem_altmap *altmap)
488 {
489 	unsigned long addr = start;
490 	pgd_t *pgd;
491 	p4d_t *p4d;
492 	pud_t *pud;
493 	pmd_t *pmd;
494 	pte_t *pte;
495 
496 	for (; addr < end; addr += PAGE_SIZE) {
497 		pgd = vmemmap_pgd_populate(addr, node);
498 		if (!pgd)
499 			return -ENOMEM;
500 		p4d = vmemmap_p4d_populate(pgd, addr, node);
501 		if (!p4d)
502 			return -ENOMEM;
503 		pud = vmemmap_pud_populate(p4d, addr, node);
504 		if (!pud)
505 			return -ENOMEM;
506 		pmd = vmemmap_pmd_populate(pud, addr, node);
507 		if (!pmd)
508 			return -ENOMEM;
509 		pte = vmemmap_pte_populate(pmd, addr, node, altmap);
510 		if (!pte)
511 			return -ENOMEM;
512 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
513 	}
514 
515 	return 0;
516 }
517 
518 struct page * __meminit __populate_section_memmap(unsigned long pfn,
519 		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
520 {
521 	unsigned long start = (unsigned long) pfn_to_page(pfn);
522 	unsigned long end = start + nr_pages * sizeof(struct page);
523 
524 	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
525 		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
526 		return NULL;
527 
528 	if (vmemmap_populate(start, end, nid, altmap))
529 		return NULL;
530 
531 	return pfn_to_page(pfn);
532 }
533