xref: /openbmc/linux/mm/sparse-vmemmap.c (revision b95046b0)
1 /*
2  * Virtual Memory Map support
3  *
4  * (C) 2007 sgi. Christoph Lameter.
5  *
6  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7  * virt_to_page, page_address() to be implemented as a base offset
8  * calculation without memory access.
9  *
10  * However, virtual mappings need a page table and TLBs. Many Linux
11  * architectures already map their physical space using 1-1 mappings
12  * via TLBs. For those arches the virtual memory map is essentially
13  * for free if we use the same page size as the 1-1 mappings. In that
14  * case the overhead consists of a few additional pages that are
15  * allocated to create a view of memory for vmemmap.
16  *
17  * The architecture is expected to provide a vmemmap_populate() function
18  * to instantiate the mapping.
19  */
20 #include <linux/mm.h>
21 #include <linux/mmzone.h>
22 #include <linux/bootmem.h>
23 #include <linux/memremap.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
29 #include <asm/dma.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 
33 /*
34  * Allocate a block of memory to be used to back the virtual memory map
35  * or to back the page tables that are used to create the mapping.
36  * Uses the main allocators if they are available, else bootmem.
37  */
38 
39 static void * __ref __earlyonly_bootmem_alloc(int node,
40 				unsigned long size,
41 				unsigned long align,
42 				unsigned long goal)
43 {
44 	return memblock_virt_alloc_try_nid(size, align, goal,
45 					    BOOTMEM_ALLOC_ACCESSIBLE, node);
46 }
47 
48 static void *vmemmap_buf;
49 static void *vmemmap_buf_end;
50 
51 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
52 {
53 	/* If the main allocator is up use that, fallback to bootmem. */
54 	if (slab_is_available()) {
55 		struct page *page;
56 
57 		page = alloc_pages_node(node,
58 			GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
59 			get_order(size));
60 		if (page)
61 			return page_address(page);
62 		return NULL;
63 	} else
64 		return __earlyonly_bootmem_alloc(node, size, size,
65 				__pa(MAX_DMA_ADDRESS));
66 }
67 
68 /* need to make sure size is all the same during early stage */
69 static void * __meminit alloc_block_buf(unsigned long size, int node)
70 {
71 	void *ptr;
72 
73 	if (!vmemmap_buf)
74 		return vmemmap_alloc_block(size, node);
75 
76 	/* take the from buf */
77 	ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
78 	if (ptr + size > vmemmap_buf_end)
79 		return vmemmap_alloc_block(size, node);
80 
81 	vmemmap_buf = ptr + size;
82 
83 	return ptr;
84 }
85 
86 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
87 {
88 	return altmap->base_pfn + altmap->reserve + altmap->alloc
89 		+ altmap->align;
90 }
91 
92 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
93 {
94 	unsigned long allocated = altmap->alloc + altmap->align;
95 
96 	if (altmap->free > allocated)
97 		return altmap->free - allocated;
98 	return 0;
99 }
100 
101 /**
102  * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
103  * @altmap - reserved page pool for the allocation
104  * @nr_pfns - size (in pages) of the allocation
105  *
106  * Allocations are aligned to the size of the request
107  */
108 static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
109 		unsigned long nr_pfns)
110 {
111 	unsigned long pfn = vmem_altmap_next_pfn(altmap);
112 	unsigned long nr_align;
113 
114 	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
115 	nr_align = ALIGN(pfn, nr_align) - pfn;
116 
117 	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
118 		return ULONG_MAX;
119 	altmap->alloc += nr_pfns;
120 	altmap->align += nr_align;
121 	return pfn + nr_align;
122 }
123 
124 static void * __meminit altmap_alloc_block_buf(unsigned long size,
125 		struct vmem_altmap *altmap)
126 {
127 	unsigned long pfn, nr_pfns;
128 	void *ptr;
129 
130 	if (size & ~PAGE_MASK) {
131 		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
132 				__func__, size);
133 		return NULL;
134 	}
135 
136 	nr_pfns = size >> PAGE_SHIFT;
137 	pfn = vmem_altmap_alloc(altmap, nr_pfns);
138 	if (pfn < ULONG_MAX)
139 		ptr = __va(__pfn_to_phys(pfn));
140 	else
141 		ptr = NULL;
142 	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
143 			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
144 
145 	return ptr;
146 }
147 
148 /* need to make sure size is all the same during early stage */
149 void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
150 		struct vmem_altmap *altmap)
151 {
152 	if (altmap)
153 		return altmap_alloc_block_buf(size, altmap);
154 	return alloc_block_buf(size, node);
155 }
156 
157 void __meminit vmemmap_verify(pte_t *pte, int node,
158 				unsigned long start, unsigned long end)
159 {
160 	unsigned long pfn = pte_pfn(*pte);
161 	int actual_node = early_pfn_to_nid(pfn);
162 
163 	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
164 		pr_warn("[%lx-%lx] potential offnode page_structs\n",
165 			start, end - 1);
166 }
167 
168 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
169 {
170 	pte_t *pte = pte_offset_kernel(pmd, addr);
171 	if (pte_none(*pte)) {
172 		pte_t entry;
173 		void *p = alloc_block_buf(PAGE_SIZE, node);
174 		if (!p)
175 			return NULL;
176 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
177 		set_pte_at(&init_mm, addr, pte, entry);
178 	}
179 	return pte;
180 }
181 
182 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
183 {
184 	pmd_t *pmd = pmd_offset(pud, addr);
185 	if (pmd_none(*pmd)) {
186 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
187 		if (!p)
188 			return NULL;
189 		pmd_populate_kernel(&init_mm, pmd, p);
190 	}
191 	return pmd;
192 }
193 
194 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
195 {
196 	pud_t *pud = pud_offset(p4d, addr);
197 	if (pud_none(*pud)) {
198 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
199 		if (!p)
200 			return NULL;
201 		pud_populate(&init_mm, pud, p);
202 	}
203 	return pud;
204 }
205 
206 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
207 {
208 	p4d_t *p4d = p4d_offset(pgd, addr);
209 	if (p4d_none(*p4d)) {
210 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
211 		if (!p)
212 			return NULL;
213 		p4d_populate(&init_mm, p4d, p);
214 	}
215 	return p4d;
216 }
217 
218 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
219 {
220 	pgd_t *pgd = pgd_offset_k(addr);
221 	if (pgd_none(*pgd)) {
222 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
223 		if (!p)
224 			return NULL;
225 		pgd_populate(&init_mm, pgd, p);
226 	}
227 	return pgd;
228 }
229 
230 int __meminit vmemmap_populate_basepages(unsigned long start,
231 					 unsigned long end, int node)
232 {
233 	unsigned long addr = start;
234 	pgd_t *pgd;
235 	p4d_t *p4d;
236 	pud_t *pud;
237 	pmd_t *pmd;
238 	pte_t *pte;
239 
240 	for (; addr < end; addr += PAGE_SIZE) {
241 		pgd = vmemmap_pgd_populate(addr, node);
242 		if (!pgd)
243 			return -ENOMEM;
244 		p4d = vmemmap_p4d_populate(pgd, addr, node);
245 		if (!p4d)
246 			return -ENOMEM;
247 		pud = vmemmap_pud_populate(p4d, addr, node);
248 		if (!pud)
249 			return -ENOMEM;
250 		pmd = vmemmap_pmd_populate(pud, addr, node);
251 		if (!pmd)
252 			return -ENOMEM;
253 		pte = vmemmap_pte_populate(pmd, addr, node);
254 		if (!pte)
255 			return -ENOMEM;
256 		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
257 	}
258 
259 	return 0;
260 }
261 
262 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
263 {
264 	unsigned long start;
265 	unsigned long end;
266 	struct page *map;
267 
268 	map = pfn_to_page(pnum * PAGES_PER_SECTION);
269 	start = (unsigned long)map;
270 	end = (unsigned long)(map + PAGES_PER_SECTION);
271 
272 	if (vmemmap_populate(start, end, nid))
273 		return NULL;
274 
275 	return map;
276 }
277 
278 void __init sparse_mem_maps_populate_node(struct page **map_map,
279 					  unsigned long pnum_begin,
280 					  unsigned long pnum_end,
281 					  unsigned long map_count, int nodeid)
282 {
283 	unsigned long pnum;
284 	unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
285 	void *vmemmap_buf_start;
286 
287 	size = ALIGN(size, PMD_SIZE);
288 	vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
289 			 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
290 
291 	if (vmemmap_buf_start) {
292 		vmemmap_buf = vmemmap_buf_start;
293 		vmemmap_buf_end = vmemmap_buf_start + size * map_count;
294 	}
295 
296 	for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
297 		struct mem_section *ms;
298 
299 		if (!present_section_nr(pnum))
300 			continue;
301 
302 		map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
303 		if (map_map[pnum])
304 			continue;
305 		ms = __nr_to_section(pnum);
306 		pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
307 		       __func__);
308 		ms->section_mem_map = 0;
309 	}
310 
311 	if (vmemmap_buf_start) {
312 		/* need to free left buf */
313 		memblock_free_early(__pa(vmemmap_buf),
314 				    vmemmap_buf_end - vmemmap_buf);
315 		vmemmap_buf = NULL;
316 		vmemmap_buf_end = NULL;
317 	}
318 }
319