xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 1d27a0be)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2006
4  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5  */
6 
7 #include <linux/memblock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
19 #include <asm/set_memory.h>
20 
21 static DEFINE_MUTEX(vmem_mutex);
22 
23 struct memory_segment {
24 	struct list_head list;
25 	unsigned long start;
26 	unsigned long size;
27 };
28 
29 static LIST_HEAD(mem_segs);
30 
31 static void __ref *vmem_alloc_pages(unsigned int order)
32 {
33 	unsigned long size = PAGE_SIZE << order;
34 
35 	if (slab_is_available())
36 		return (void *)__get_free_pages(GFP_KERNEL, order);
37 	return (void *) memblock_phys_alloc(size, size);
38 }
39 
40 void *vmem_crst_alloc(unsigned long val)
41 {
42 	unsigned long *table;
43 
44 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
45 	if (table)
46 		crst_table_init(table, val);
47 	return table;
48 }
49 
50 pte_t __ref *vmem_pte_alloc(void)
51 {
52 	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
53 	pte_t *pte;
54 
55 	if (slab_is_available())
56 		pte = (pte_t *) page_table_alloc(&init_mm);
57 	else
58 		pte = (pte_t *) memblock_phys_alloc(size, size);
59 	if (!pte)
60 		return NULL;
61 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
62 	return pte;
63 }
64 
65 /*
66  * Add a physical memory range to the 1:1 mapping.
67  */
68 static int vmem_add_mem(unsigned long start, unsigned long size)
69 {
70 	unsigned long pgt_prot, sgt_prot, r3_prot;
71 	unsigned long pages4k, pages1m, pages2g;
72 	unsigned long end = start + size;
73 	unsigned long address = start;
74 	pgd_t *pg_dir;
75 	p4d_t *p4_dir;
76 	pud_t *pu_dir;
77 	pmd_t *pm_dir;
78 	pte_t *pt_dir;
79 	int ret = -ENOMEM;
80 
81 	pgt_prot = pgprot_val(PAGE_KERNEL);
82 	sgt_prot = pgprot_val(SEGMENT_KERNEL);
83 	r3_prot = pgprot_val(REGION3_KERNEL);
84 	if (!MACHINE_HAS_NX) {
85 		pgt_prot &= ~_PAGE_NOEXEC;
86 		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
87 		r3_prot &= ~_REGION_ENTRY_NOEXEC;
88 	}
89 	pages4k = pages1m = pages2g = 0;
90 	while (address < end) {
91 		pg_dir = pgd_offset_k(address);
92 		if (pgd_none(*pg_dir)) {
93 			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
94 			if (!p4_dir)
95 				goto out;
96 			pgd_populate(&init_mm, pg_dir, p4_dir);
97 		}
98 		p4_dir = p4d_offset(pg_dir, address);
99 		if (p4d_none(*p4_dir)) {
100 			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
101 			if (!pu_dir)
102 				goto out;
103 			p4d_populate(&init_mm, p4_dir, pu_dir);
104 		}
105 		pu_dir = pud_offset(p4_dir, address);
106 		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
107 		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
108 		     !debug_pagealloc_enabled()) {
109 			pud_val(*pu_dir) = address | r3_prot;
110 			address += PUD_SIZE;
111 			pages2g++;
112 			continue;
113 		}
114 		if (pud_none(*pu_dir)) {
115 			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
116 			if (!pm_dir)
117 				goto out;
118 			pud_populate(&init_mm, pu_dir, pm_dir);
119 		}
120 		pm_dir = pmd_offset(pu_dir, address);
121 		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
122 		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
123 		    !debug_pagealloc_enabled()) {
124 			pmd_val(*pm_dir) = address | sgt_prot;
125 			address += PMD_SIZE;
126 			pages1m++;
127 			continue;
128 		}
129 		if (pmd_none(*pm_dir)) {
130 			pt_dir = vmem_pte_alloc();
131 			if (!pt_dir)
132 				goto out;
133 			pmd_populate(&init_mm, pm_dir, pt_dir);
134 		}
135 
136 		pt_dir = pte_offset_kernel(pm_dir, address);
137 		pte_val(*pt_dir) = address | pgt_prot;
138 		address += PAGE_SIZE;
139 		pages4k++;
140 	}
141 	ret = 0;
142 out:
143 	update_page_count(PG_DIRECT_MAP_4K, pages4k);
144 	update_page_count(PG_DIRECT_MAP_1M, pages1m);
145 	update_page_count(PG_DIRECT_MAP_2G, pages2g);
146 	return ret;
147 }
148 
149 /*
150  * Remove a physical memory range from the 1:1 mapping.
151  * Currently only invalidates page table entries.
152  */
153 static void vmem_remove_range(unsigned long start, unsigned long size)
154 {
155 	unsigned long pages4k, pages1m, pages2g;
156 	unsigned long end = start + size;
157 	unsigned long address = start;
158 	pgd_t *pg_dir;
159 	p4d_t *p4_dir;
160 	pud_t *pu_dir;
161 	pmd_t *pm_dir;
162 	pte_t *pt_dir;
163 
164 	pages4k = pages1m = pages2g = 0;
165 	while (address < end) {
166 		pg_dir = pgd_offset_k(address);
167 		if (pgd_none(*pg_dir)) {
168 			address += PGDIR_SIZE;
169 			continue;
170 		}
171 		p4_dir = p4d_offset(pg_dir, address);
172 		if (p4d_none(*p4_dir)) {
173 			address += P4D_SIZE;
174 			continue;
175 		}
176 		pu_dir = pud_offset(p4_dir, address);
177 		if (pud_none(*pu_dir)) {
178 			address += PUD_SIZE;
179 			continue;
180 		}
181 		if (pud_large(*pu_dir)) {
182 			pud_clear(pu_dir);
183 			address += PUD_SIZE;
184 			pages2g++;
185 			continue;
186 		}
187 		pm_dir = pmd_offset(pu_dir, address);
188 		if (pmd_none(*pm_dir)) {
189 			address += PMD_SIZE;
190 			continue;
191 		}
192 		if (pmd_large(*pm_dir)) {
193 			pmd_clear(pm_dir);
194 			address += PMD_SIZE;
195 			pages1m++;
196 			continue;
197 		}
198 		pt_dir = pte_offset_kernel(pm_dir, address);
199 		pte_clear(&init_mm, address, pt_dir);
200 		address += PAGE_SIZE;
201 		pages4k++;
202 	}
203 	flush_tlb_kernel_range(start, end);
204 	update_page_count(PG_DIRECT_MAP_4K, -pages4k);
205 	update_page_count(PG_DIRECT_MAP_1M, -pages1m);
206 	update_page_count(PG_DIRECT_MAP_2G, -pages2g);
207 }
208 
209 /*
210  * Add a backed mem_map array to the virtual mem_map array.
211  */
212 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
213 		struct vmem_altmap *altmap)
214 {
215 	unsigned long pgt_prot, sgt_prot;
216 	unsigned long address = start;
217 	pgd_t *pg_dir;
218 	p4d_t *p4_dir;
219 	pud_t *pu_dir;
220 	pmd_t *pm_dir;
221 	pte_t *pt_dir;
222 	int ret = -ENOMEM;
223 
224 	pgt_prot = pgprot_val(PAGE_KERNEL);
225 	sgt_prot = pgprot_val(SEGMENT_KERNEL);
226 	if (!MACHINE_HAS_NX) {
227 		pgt_prot &= ~_PAGE_NOEXEC;
228 		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
229 	}
230 	for (address = start; address < end;) {
231 		pg_dir = pgd_offset_k(address);
232 		if (pgd_none(*pg_dir)) {
233 			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
234 			if (!p4_dir)
235 				goto out;
236 			pgd_populate(&init_mm, pg_dir, p4_dir);
237 		}
238 
239 		p4_dir = p4d_offset(pg_dir, address);
240 		if (p4d_none(*p4_dir)) {
241 			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
242 			if (!pu_dir)
243 				goto out;
244 			p4d_populate(&init_mm, p4_dir, pu_dir);
245 		}
246 
247 		pu_dir = pud_offset(p4_dir, address);
248 		if (pud_none(*pu_dir)) {
249 			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
250 			if (!pm_dir)
251 				goto out;
252 			pud_populate(&init_mm, pu_dir, pm_dir);
253 		}
254 
255 		pm_dir = pmd_offset(pu_dir, address);
256 		if (pmd_none(*pm_dir)) {
257 			/* Use 1MB frames for vmemmap if available. We always
258 			 * use large frames even if they are only partially
259 			 * used.
260 			 * Otherwise we would have also page tables since
261 			 * vmemmap_populate gets called for each section
262 			 * separately. */
263 			if (MACHINE_HAS_EDAT1) {
264 				void *new_page;
265 
266 				new_page = vmemmap_alloc_block(PMD_SIZE, node);
267 				if (!new_page)
268 					goto out;
269 				pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
270 				address = (address + PMD_SIZE) & PMD_MASK;
271 				continue;
272 			}
273 			pt_dir = vmem_pte_alloc();
274 			if (!pt_dir)
275 				goto out;
276 			pmd_populate(&init_mm, pm_dir, pt_dir);
277 		} else if (pmd_large(*pm_dir)) {
278 			address = (address + PMD_SIZE) & PMD_MASK;
279 			continue;
280 		}
281 
282 		pt_dir = pte_offset_kernel(pm_dir, address);
283 		if (pte_none(*pt_dir)) {
284 			void *new_page;
285 
286 			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
287 			if (!new_page)
288 				goto out;
289 			pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
290 		}
291 		address += PAGE_SIZE;
292 	}
293 	ret = 0;
294 out:
295 	return ret;
296 }
297 
298 void vmemmap_free(unsigned long start, unsigned long end,
299 		struct vmem_altmap *altmap)
300 {
301 }
302 
303 /*
304  * Add memory segment to the segment list if it doesn't overlap with
305  * an already present segment.
306  */
307 static int insert_memory_segment(struct memory_segment *seg)
308 {
309 	struct memory_segment *tmp;
310 
311 	if (seg->start + seg->size > VMEM_MAX_PHYS ||
312 	    seg->start + seg->size < seg->start)
313 		return -ERANGE;
314 
315 	list_for_each_entry(tmp, &mem_segs, list) {
316 		if (seg->start >= tmp->start + tmp->size)
317 			continue;
318 		if (seg->start + seg->size <= tmp->start)
319 			continue;
320 		return -ENOSPC;
321 	}
322 	list_add(&seg->list, &mem_segs);
323 	return 0;
324 }
325 
326 /*
327  * Remove memory segment from the segment list.
328  */
329 static void remove_memory_segment(struct memory_segment *seg)
330 {
331 	list_del(&seg->list);
332 }
333 
334 static void __remove_shared_memory(struct memory_segment *seg)
335 {
336 	remove_memory_segment(seg);
337 	vmem_remove_range(seg->start, seg->size);
338 }
339 
340 int vmem_remove_mapping(unsigned long start, unsigned long size)
341 {
342 	struct memory_segment *seg;
343 	int ret;
344 
345 	mutex_lock(&vmem_mutex);
346 
347 	ret = -ENOENT;
348 	list_for_each_entry(seg, &mem_segs, list) {
349 		if (seg->start == start && seg->size == size)
350 			break;
351 	}
352 
353 	if (seg->start != start || seg->size != size)
354 		goto out;
355 
356 	ret = 0;
357 	__remove_shared_memory(seg);
358 	kfree(seg);
359 out:
360 	mutex_unlock(&vmem_mutex);
361 	return ret;
362 }
363 
364 int vmem_add_mapping(unsigned long start, unsigned long size)
365 {
366 	struct memory_segment *seg;
367 	int ret;
368 
369 	mutex_lock(&vmem_mutex);
370 	ret = -ENOMEM;
371 	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 	if (!seg)
373 		goto out;
374 	seg->start = start;
375 	seg->size = size;
376 
377 	ret = insert_memory_segment(seg);
378 	if (ret)
379 		goto out_free;
380 
381 	ret = vmem_add_mem(start, size);
382 	if (ret)
383 		goto out_remove;
384 	goto out;
385 
386 out_remove:
387 	__remove_shared_memory(seg);
388 out_free:
389 	kfree(seg);
390 out:
391 	mutex_unlock(&vmem_mutex);
392 	return ret;
393 }
394 
395 /*
396  * map whole physical memory to virtual memory (identity mapping)
397  * we reserve enough space in the vmalloc area for vmemmap to hotplug
398  * additional memory segments.
399  */
400 void __init vmem_map_init(void)
401 {
402 	struct memblock_region *reg;
403 
404 	for_each_memblock(memory, reg)
405 		vmem_add_mem(reg->base, reg->size);
406 	__set_memory((unsigned long)_stext,
407 		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
408 		     SET_MEMORY_RO | SET_MEMORY_X);
409 	__set_memory((unsigned long)_etext,
410 		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
411 		     SET_MEMORY_RO);
412 	__set_memory((unsigned long)_sinittext,
413 		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
414 		     SET_MEMORY_RO | SET_MEMORY_X);
415 	__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
416 		     SET_MEMORY_RO | SET_MEMORY_X);
417 
418 	/* we need lowcore executable for our LPSWE instructions */
419 	set_memory_x(0, 1);
420 
421 	pr_info("Write protected kernel read-only data: %luk\n",
422 		(unsigned long)(__end_rodata - _stext) >> 10);
423 }
424 
425 /*
426  * Convert memblock.memory  to a memory segment list so there is a single
427  * list that contains all memory segments.
428  */
429 static int __init vmem_convert_memory_chunk(void)
430 {
431 	struct memblock_region *reg;
432 	struct memory_segment *seg;
433 
434 	mutex_lock(&vmem_mutex);
435 	for_each_memblock(memory, reg) {
436 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
437 		if (!seg)
438 			panic("Out of memory...\n");
439 		seg->start = reg->base;
440 		seg->size = reg->size;
441 		insert_memory_segment(seg);
442 	}
443 	mutex_unlock(&vmem_mutex);
444 	return 0;
445 }
446 
447 core_initcall(vmem_convert_memory_chunk);
448