xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 7a6ee0bb)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22  * must be divided as follows:
23  * - the first PGD entry, although incomplete, is populated with
24  *   kasan_early_shadow_pud/p4d
25  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26  * - the last PGD entry is shared with the kernel mapping so populated at the
27  *   lower levels pud/p4d
28  *
29  * In addition, when shallow populating a kasan region (for example vmalloc),
30  * this region may also not be aligned on PGDIR size, so we must go down to the
31  * pud level too.
32  */
33 
34 extern pgd_t early_pg_dir[PTRS_PER_PGD];
35 
36 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
37 {
38 	phys_addr_t phys_addr;
39 	pte_t *ptep, *base_pte;
40 
41 	if (pmd_none(*pmd))
42 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43 	else
44 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
45 
46 	ptep = base_pte + pte_index(vaddr);
47 
48 	do {
49 		if (pte_none(*ptep)) {
50 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52 		}
53 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54 
55 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
56 }
57 
58 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59 {
60 	phys_addr_t phys_addr;
61 	pmd_t *pmdp, *base_pmd;
62 	unsigned long next;
63 
64 	if (pud_none(*pud)) {
65 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66 	} else {
67 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70 	}
71 
72 	pmdp = base_pmd + pmd_index(vaddr);
73 
74 	do {
75 		next = pmd_addr_end(vaddr, end);
76 
77 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79 			if (phys_addr) {
80 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81 				continue;
82 			}
83 		}
84 
85 		kasan_populate_pte(pmdp, vaddr, next);
86 	} while (pmdp++, vaddr = next, vaddr != end);
87 
88 	/*
89 	 * Wait for the whole PGD to be populated before setting the PGD in
90 	 * the page table, otherwise, if we did set the PGD before populating
91 	 * it entirely, memblock could allocate a page at a physical address
92 	 * where KASAN is not populated yet and then we'd get a page fault.
93 	 */
94 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95 }
96 
97 static void __init kasan_populate_pud(pgd_t *pgd,
98 				      unsigned long vaddr, unsigned long end,
99 				      bool early)
100 {
101 	phys_addr_t phys_addr;
102 	pud_t *pudp, *base_pud;
103 	unsigned long next;
104 
105 	if (early) {
106 		/*
107 		 * We can't use pgd_page_vaddr here as it would return a linear
108 		 * mapping address but it is not mapped yet, but when populating
109 		 * early_pg_dir, we need the physical address and when populating
110 		 * swapper_pg_dir, we need the kernel virtual address so use
111 		 * pt_ops facility.
112 		 */
113 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114 	} else {
115 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
116 		if (base_pud == lm_alias(kasan_early_shadow_pud)) {
117 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
118 			memcpy(base_pud, (void *)kasan_early_shadow_pud,
119 			       sizeof(pud_t) * PTRS_PER_PUD);
120 		}
121 	}
122 
123 	pudp = base_pud + pud_index(vaddr);
124 
125 	do {
126 		next = pud_addr_end(vaddr, end);
127 
128 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
129 			if (early) {
130 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
131 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
132 				continue;
133 			} else {
134 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
135 				if (phys_addr) {
136 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
137 					continue;
138 				}
139 			}
140 		}
141 
142 		kasan_populate_pmd(pudp, vaddr, next);
143 	} while (pudp++, vaddr = next, vaddr != end);
144 
145 	/*
146 	 * Wait for the whole PGD to be populated before setting the PGD in
147 	 * the page table, otherwise, if we did set the PGD before populating
148 	 * it entirely, memblock could allocate a page at a physical address
149 	 * where KASAN is not populated yet and then we'd get a page fault.
150 	 */
151 	if (!early)
152 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
153 }
154 
155 #define kasan_early_shadow_pgd_next			(pgtable_l4_enabled ?	\
156 				(uintptr_t)kasan_early_shadow_pud :		\
157 				(uintptr_t)kasan_early_shadow_pmd)
158 #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
159 		(pgtable_l4_enabled ?						\
160 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
161 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
162 
163 static void __init kasan_populate_pgd(pgd_t *pgdp,
164 				      unsigned long vaddr, unsigned long end,
165 				      bool early)
166 {
167 	phys_addr_t phys_addr;
168 	unsigned long next;
169 
170 	do {
171 		next = pgd_addr_end(vaddr, end);
172 
173 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
174 			if (early) {
175 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
176 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
177 				continue;
178 			} else if (pgd_page_vaddr(*pgdp) ==
179 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
180 				/*
181 				 * pgdp can't be none since kasan_early_init
182 				 * initialized all KASAN shadow region with
183 				 * kasan_early_shadow_pud: if this is still the
184 				 * case, that means we can try to allocate a
185 				 * hugepage as a replacement.
186 				 */
187 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
188 				if (phys_addr) {
189 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
190 					continue;
191 				}
192 			}
193 		}
194 
195 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
196 	} while (pgdp++, vaddr = next, vaddr != end);
197 }
198 
199 asmlinkage void __init kasan_early_init(void)
200 {
201 	uintptr_t i;
202 
203 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
204 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
205 
206 	for (i = 0; i < PTRS_PER_PTE; ++i)
207 		set_pte(kasan_early_shadow_pte + i,
208 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
209 
210 	for (i = 0; i < PTRS_PER_PMD; ++i)
211 		set_pmd(kasan_early_shadow_pmd + i,
212 			pfn_pmd(PFN_DOWN
213 				(__pa((uintptr_t)kasan_early_shadow_pte)),
214 				PAGE_TABLE));
215 
216 	if (pgtable_l4_enabled) {
217 		for (i = 0; i < PTRS_PER_PUD; ++i)
218 			set_pud(kasan_early_shadow_pud + i,
219 				pfn_pud(PFN_DOWN
220 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
221 					PAGE_TABLE));
222 	}
223 
224 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
225 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
226 
227 	local_flush_tlb_all();
228 }
229 
230 void __init kasan_swapper_init(void)
231 {
232 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
233 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
234 
235 	local_flush_tlb_all();
236 }
237 
238 static void __init kasan_populate(void *start, void *end)
239 {
240 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
241 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
242 
243 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
244 
245 	local_flush_tlb_all();
246 	memset(start, KASAN_SHADOW_INIT, end - start);
247 }
248 
249 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
250 					      unsigned long vaddr, unsigned long end,
251 					      bool kasan_populate)
252 {
253 	unsigned long next;
254 	pud_t *pudp, *base_pud;
255 	pmd_t *base_pmd;
256 	bool is_kasan_pmd;
257 
258 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
259 	pudp = base_pud + pud_index(vaddr);
260 
261 	if (kasan_populate)
262 		memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
263 		       sizeof(pud_t) * PTRS_PER_PUD);
264 
265 	do {
266 		next = pud_addr_end(vaddr, end);
267 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
268 
269 		if (is_kasan_pmd) {
270 			base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
271 			set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
272 		}
273 	} while (pudp++, vaddr = next, vaddr != end);
274 }
275 
276 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
277 {
278 	unsigned long next;
279 	void *p;
280 	pgd_t *pgd_k = pgd_offset_k(vaddr);
281 	bool is_kasan_pgd_next;
282 
283 	do {
284 		next = pgd_addr_end(vaddr, end);
285 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
286 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
287 
288 		if (is_kasan_pgd_next) {
289 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
290 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
291 		}
292 
293 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
294 			continue;
295 
296 		kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
297 	} while (pgd_k++, vaddr = next, vaddr != end);
298 }
299 
300 static void __init kasan_shallow_populate(void *start, void *end)
301 {
302 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
303 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
304 
305 	kasan_shallow_populate_pgd(vaddr, vend);
306 	local_flush_tlb_all();
307 }
308 
309 void __init kasan_init(void)
310 {
311 	phys_addr_t p_start, p_end;
312 	u64 i;
313 
314 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
315 		kasan_shallow_populate(
316 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
317 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
318 
319 	/* Populate the linear mapping */
320 	for_each_mem_range(i, &p_start, &p_end) {
321 		void *start = (void *)__va(p_start);
322 		void *end = (void *)__va(p_end);
323 
324 		if (start >= end)
325 			break;
326 
327 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
328 	}
329 
330 	/* Populate kernel, BPF, modules mapping */
331 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
332 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
333 
334 	for (i = 0; i < PTRS_PER_PTE; i++)
335 		set_pte(&kasan_early_shadow_pte[i],
336 			mk_pte(virt_to_page(kasan_early_shadow_page),
337 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
338 					_PAGE_ACCESSED)));
339 
340 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
341 	init_task.kasan_depth = 0;
342 }
343