xref: /openbmc/linux/arch/s390/boot/vmem.c (revision f0931824)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/pgalloc.h>
6 #include <asm/facility.h>
7 #include <asm/sections.h>
8 #include <asm/physmem_info.h>
9 #include <asm/maccess.h>
10 #include <asm/abs_lowcore.h>
11 #include "decompressor.h"
12 #include "boot.h"
13 
14 unsigned long __bootdata_preserved(s390_invalid_asce);
15 
16 #ifdef CONFIG_PROC_FS
17 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
18 #endif
19 
20 #define init_mm			(*(struct mm_struct *)vmlinux.init_mm_off)
21 #define swapper_pg_dir		vmlinux.swapper_pg_dir_off
22 #define invalid_pg_dir		vmlinux.invalid_pg_dir_off
23 
24 enum populate_mode {
25 	POPULATE_NONE,
26 	POPULATE_DIRECT,
27 	POPULATE_ABS_LOWCORE,
28 #ifdef CONFIG_KASAN
29 	POPULATE_KASAN_MAP_SHADOW,
30 	POPULATE_KASAN_ZERO_SHADOW,
31 	POPULATE_KASAN_SHALLOW
32 #endif
33 };
34 
35 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
36 
37 #ifdef CONFIG_KASAN
38 
39 #define kasan_early_shadow_page	vmlinux.kasan_early_shadow_page_off
40 #define kasan_early_shadow_pte	((pte_t *)vmlinux.kasan_early_shadow_pte_off)
41 #define kasan_early_shadow_pmd	((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
42 #define kasan_early_shadow_pud	((pud_t *)vmlinux.kasan_early_shadow_pud_off)
43 #define kasan_early_shadow_p4d	((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
44 #define __sha(x)		((unsigned long)kasan_mem_to_shadow((void *)x))
45 
46 static pte_t pte_z;
47 
48 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
49 {
50 	start = PAGE_ALIGN_DOWN(__sha(start));
51 	end = PAGE_ALIGN(__sha(end));
52 	pgtable_populate(start, end, mode);
53 }
54 
55 static void kasan_populate_shadow(void)
56 {
57 	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
58 	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
59 	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
60 	unsigned long memgap_start = 0;
61 	unsigned long untracked_end;
62 	unsigned long start, end;
63 	int i;
64 
65 	pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
66 	if (!machine.has_nx)
67 		pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
68 	crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
69 	crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
70 	crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
71 	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
72 
73 	/*
74 	 * Current memory layout:
75 	 * +- 0 -------------+	       +- shadow start -+
76 	 * |1:1 ident mapping|	      /|1/8 of ident map|
77 	 * |		     |	     / |		|
78 	 * +-end of ident map+	    /  +----------------+
79 	 * | ... gap ...     |	   /   |    kasan	|
80 	 * |		     |	  /    |  zero page	|
81 	 * +- vmalloc area  -+	 /     |   mapping	|
82 	 * | vmalloc_size    |	/      | (untracked)	|
83 	 * +- modules vaddr -+ /       +----------------+
84 	 * | 2Gb	     |/        |    unmapped	| allocated per module
85 	 * +- shadow start  -+	       +----------------+
86 	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
87 	 * +- shadow end ----+---------+- shadow end ---+
88 	 *
89 	 * Current memory layout (KASAN_VMALLOC):
90 	 * +- 0 -------------+	       +- shadow start -+
91 	 * |1:1 ident mapping|	      /|1/8 of ident map|
92 	 * |		     |	     / |		|
93 	 * +-end of ident map+	    /  +----------------+
94 	 * | ... gap ...     |	   /   | kasan zero page| (untracked)
95 	 * |		     |	  /    | mapping	|
96 	 * +- vmalloc area  -+	 /     +----------------+
97 	 * | vmalloc_size    |	/      |shallow populate|
98 	 * +- modules vaddr -+ /       +----------------+
99 	 * | 2Gb	     |/        |shallow populate|
100 	 * +- shadow start  -+	       +----------------+
101 	 * | 1/8 addr space  |	       | zero pg mapping| (untracked)
102 	 * +- shadow end ----+---------+- shadow end ---+
103 	 */
104 
105 	for_each_physmem_usable_range(i, &start, &end) {
106 		kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
107 		if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
108 			kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
109 		memgap_start = end;
110 	}
111 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
112 		untracked_end = VMALLOC_START;
113 		/* shallowly populate kasan shadow for vmalloc and modules */
114 		kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
115 	} else {
116 		untracked_end = MODULES_VADDR;
117 	}
118 	/* populate kasan shadow for untracked memory */
119 	kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
120 	kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
121 }
122 
123 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
124 					   unsigned long end, enum populate_mode mode)
125 {
126 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
127 	    IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
128 		pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
129 		return true;
130 	}
131 	return false;
132 }
133 
134 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
135 					   unsigned long end, enum populate_mode mode)
136 {
137 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
138 	    IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
139 		p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
140 		return true;
141 	}
142 	return false;
143 }
144 
145 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
146 					   unsigned long end, enum populate_mode mode)
147 {
148 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
149 	    IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
150 		pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
151 		return true;
152 	}
153 	return false;
154 }
155 
156 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
157 					   unsigned long end, enum populate_mode mode)
158 {
159 	if (mode == POPULATE_KASAN_ZERO_SHADOW &&
160 	    IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
161 		pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
162 		return true;
163 	}
164 	return false;
165 }
166 
167 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
168 {
169 	pte_t entry;
170 
171 	if (mode == POPULATE_KASAN_ZERO_SHADOW) {
172 		set_pte(pte, pte_z);
173 		return true;
174 	}
175 	return false;
176 }
177 #else
178 
179 static inline void kasan_populate_shadow(void) {}
180 
181 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
182 						  unsigned long end, enum populate_mode mode)
183 {
184 	return false;
185 }
186 
187 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
188 						  unsigned long end, enum populate_mode mode)
189 {
190 	return false;
191 }
192 
193 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
194 						  unsigned long end, enum populate_mode mode)
195 {
196 	return false;
197 }
198 
199 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
200 						  unsigned long end, enum populate_mode mode)
201 {
202 	return false;
203 }
204 
205 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
206 {
207 	return false;
208 }
209 
210 #endif
211 
212 /*
213  * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
214  */
215 static inline pte_t *__virt_to_kpte(unsigned long va)
216 {
217 	return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
218 }
219 
220 static void *boot_crst_alloc(unsigned long val)
221 {
222 	unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
223 	unsigned long *table;
224 
225 	table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
226 	crst_table_init(table, val);
227 	return table;
228 }
229 
230 static pte_t *boot_pte_alloc(void)
231 {
232 	static void *pte_leftover;
233 	pte_t *pte;
234 
235 	/*
236 	 * handling pte_leftovers this way helps to avoid memory fragmentation
237 	 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
238 	 */
239 	if (!pte_leftover) {
240 		pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
241 		pte = pte_leftover + _PAGE_TABLE_SIZE;
242 	} else {
243 		pte = pte_leftover;
244 		pte_leftover = NULL;
245 	}
246 
247 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
248 	return pte;
249 }
250 
251 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
252 {
253 	switch (mode) {
254 	case POPULATE_NONE:
255 		return -1;
256 	case POPULATE_DIRECT:
257 		return addr;
258 	case POPULATE_ABS_LOWCORE:
259 		return __abs_lowcore_pa(addr);
260 #ifdef CONFIG_KASAN
261 	case POPULATE_KASAN_MAP_SHADOW:
262 		addr = physmem_alloc_top_down(RR_VMEM, size, size);
263 		memset((void *)addr, 0, size);
264 		return addr;
265 #endif
266 	default:
267 		return -1;
268 	}
269 }
270 
271 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
272 {
273 	return machine.has_edat2 &&
274 	       IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
275 }
276 
277 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
278 {
279 	return machine.has_edat1 &&
280 	       IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
281 }
282 
283 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
284 				 enum populate_mode mode)
285 {
286 	unsigned long pages = 0;
287 	pte_t *pte, entry;
288 
289 	pte = pte_offset_kernel(pmd, addr);
290 	for (; addr < end; addr += PAGE_SIZE, pte++) {
291 		if (pte_none(*pte)) {
292 			if (kasan_pte_populate_zero_shadow(pte, mode))
293 				continue;
294 			entry = __pte(_pa(addr, PAGE_SIZE, mode));
295 			entry = set_pte_bit(entry, PAGE_KERNEL);
296 			if (!machine.has_nx)
297 				entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
298 			set_pte(pte, entry);
299 			pages++;
300 		}
301 	}
302 	if (mode == POPULATE_DIRECT)
303 		update_page_count(PG_DIRECT_MAP_4K, pages);
304 }
305 
306 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
307 				 enum populate_mode mode)
308 {
309 	unsigned long next, pages = 0;
310 	pmd_t *pmd, entry;
311 	pte_t *pte;
312 
313 	pmd = pmd_offset(pud, addr);
314 	for (; addr < end; addr = next, pmd++) {
315 		next = pmd_addr_end(addr, end);
316 		if (pmd_none(*pmd)) {
317 			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
318 				continue;
319 			if (can_large_pmd(pmd, addr, next)) {
320 				entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
321 				entry = set_pmd_bit(entry, SEGMENT_KERNEL);
322 				if (!machine.has_nx)
323 					entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
324 				set_pmd(pmd, entry);
325 				pages++;
326 				continue;
327 			}
328 			pte = boot_pte_alloc();
329 			pmd_populate(&init_mm, pmd, pte);
330 		} else if (pmd_large(*pmd)) {
331 			continue;
332 		}
333 		pgtable_pte_populate(pmd, addr, next, mode);
334 	}
335 	if (mode == POPULATE_DIRECT)
336 		update_page_count(PG_DIRECT_MAP_1M, pages);
337 }
338 
339 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
340 				 enum populate_mode mode)
341 {
342 	unsigned long next, pages = 0;
343 	pud_t *pud, entry;
344 	pmd_t *pmd;
345 
346 	pud = pud_offset(p4d, addr);
347 	for (; addr < end; addr = next, pud++) {
348 		next = pud_addr_end(addr, end);
349 		if (pud_none(*pud)) {
350 			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
351 				continue;
352 			if (can_large_pud(pud, addr, next)) {
353 				entry = __pud(_pa(addr, _REGION3_SIZE, mode));
354 				entry = set_pud_bit(entry, REGION3_KERNEL);
355 				if (!machine.has_nx)
356 					entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
357 				set_pud(pud, entry);
358 				pages++;
359 				continue;
360 			}
361 			pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
362 			pud_populate(&init_mm, pud, pmd);
363 		} else if (pud_leaf(*pud)) {
364 			continue;
365 		}
366 		pgtable_pmd_populate(pud, addr, next, mode);
367 	}
368 	if (mode == POPULATE_DIRECT)
369 		update_page_count(PG_DIRECT_MAP_2G, pages);
370 }
371 
372 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
373 				 enum populate_mode mode)
374 {
375 	unsigned long next;
376 	p4d_t *p4d;
377 	pud_t *pud;
378 
379 	p4d = p4d_offset(pgd, addr);
380 	for (; addr < end; addr = next, p4d++) {
381 		next = p4d_addr_end(addr, end);
382 		if (p4d_none(*p4d)) {
383 			if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
384 				continue;
385 			pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
386 			p4d_populate(&init_mm, p4d, pud);
387 		}
388 		pgtable_pud_populate(p4d, addr, next, mode);
389 	}
390 }
391 
392 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
393 {
394 	unsigned long next;
395 	pgd_t *pgd;
396 	p4d_t *p4d;
397 
398 	pgd = pgd_offset(&init_mm, addr);
399 	for (; addr < end; addr = next, pgd++) {
400 		next = pgd_addr_end(addr, end);
401 		if (pgd_none(*pgd)) {
402 			if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
403 				continue;
404 			p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
405 			pgd_populate(&init_mm, pgd, p4d);
406 		}
407 #ifdef CONFIG_KASAN
408 		if (mode == POPULATE_KASAN_SHALLOW)
409 			continue;
410 #endif
411 		pgtable_p4d_populate(pgd, addr, next, mode);
412 	}
413 }
414 
415 void setup_vmem(unsigned long asce_limit)
416 {
417 	unsigned long start, end;
418 	unsigned long asce_type;
419 	unsigned long asce_bits;
420 	int i;
421 
422 	if (asce_limit == _REGION1_SIZE) {
423 		asce_type = _REGION2_ENTRY_EMPTY;
424 		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
425 	} else {
426 		asce_type = _REGION3_ENTRY_EMPTY;
427 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
428 	}
429 	s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
430 
431 	crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
432 	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
433 
434 	/*
435 	 * To allow prefixing the lowcore must be mapped with 4KB pages.
436 	 * To prevent creation of a large page at address 0 first map
437 	 * the lowcore and create the identity mapping only afterwards.
438 	 */
439 	pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
440 	for_each_physmem_usable_range(i, &start, &end)
441 		pgtable_populate(start, end, POPULATE_DIRECT);
442 	pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
443 			 POPULATE_ABS_LOWCORE);
444 	pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
445 			 POPULATE_NONE);
446 	memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
447 
448 	kasan_populate_shadow();
449 
450 	S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
451 	S390_lowcore.user_asce = s390_invalid_asce;
452 
453 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
454 	__ctl_load(S390_lowcore.user_asce, 7, 7);
455 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
456 
457 	init_mm.context.asce = S390_lowcore.kernel_asce;
458 }
459