xref: /openbmc/linux/arch/riscv/mm/kasan_init.c (revision 8e74a48d)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22  * must be divided as follows:
23  * - the first PGD entry, although incomplete, is populated with
24  *   kasan_early_shadow_pud/p4d
25  * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26  * - the last PGD entry is shared with the kernel mapping so populated at the
27  *   lower levels pud/p4d
28  *
29  * In addition, when shallow populating a kasan region (for example vmalloc),
30  * this region may also not be aligned on PGDIR size, so we must go down to the
31  * pud level too.
32  */
33 
34 extern pgd_t early_pg_dir[PTRS_PER_PGD];
35 
36 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
37 {
38 	phys_addr_t phys_addr;
39 	pte_t *ptep, *base_pte;
40 
41 	if (pmd_none(*pmd))
42 		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43 	else
44 		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
45 
46 	ptep = base_pte + pte_index(vaddr);
47 
48 	do {
49 		if (pte_none(*ptep)) {
50 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52 		}
53 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54 
55 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
56 }
57 
58 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
59 {
60 	phys_addr_t phys_addr;
61 	pmd_t *pmdp, *base_pmd;
62 	unsigned long next;
63 
64 	if (pud_none(*pud)) {
65 		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
66 	} else {
67 		base_pmd = (pmd_t *)pud_pgtable(*pud);
68 		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69 			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70 	}
71 
72 	pmdp = base_pmd + pmd_index(vaddr);
73 
74 	do {
75 		next = pmd_addr_end(vaddr, end);
76 
77 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79 			if (phys_addr) {
80 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81 				continue;
82 			}
83 		}
84 
85 		kasan_populate_pte(pmdp, vaddr, next);
86 	} while (pmdp++, vaddr = next, vaddr != end);
87 
88 	/*
89 	 * Wait for the whole PGD to be populated before setting the PGD in
90 	 * the page table, otherwise, if we did set the PGD before populating
91 	 * it entirely, memblock could allocate a page at a physical address
92 	 * where KASAN is not populated yet and then we'd get a page fault.
93 	 */
94 	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
95 }
96 
97 static void __init kasan_populate_pud(pgd_t *pgd,
98 				      unsigned long vaddr, unsigned long end,
99 				      bool early)
100 {
101 	phys_addr_t phys_addr;
102 	pud_t *pudp, *base_pud;
103 	unsigned long next;
104 
105 	if (early) {
106 		/*
107 		 * We can't use pgd_page_vaddr here as it would return a linear
108 		 * mapping address but it is not mapped yet, but when populating
109 		 * early_pg_dir, we need the physical address and when populating
110 		 * swapper_pg_dir, we need the kernel virtual address so use
111 		 * pt_ops facility.
112 		 */
113 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114 	} else {
115 		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
116 		if (base_pud == lm_alias(kasan_early_shadow_pud))
117 			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
118 	}
119 
120 	pudp = base_pud + pud_index(vaddr);
121 
122 	do {
123 		next = pud_addr_end(vaddr, end);
124 
125 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
126 			if (early) {
127 				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
128 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
129 				continue;
130 			} else {
131 				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
132 				if (phys_addr) {
133 					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
134 					continue;
135 				}
136 			}
137 		}
138 
139 		kasan_populate_pmd(pudp, vaddr, next);
140 	} while (pudp++, vaddr = next, vaddr != end);
141 
142 	/*
143 	 * Wait for the whole PGD to be populated before setting the PGD in
144 	 * the page table, otherwise, if we did set the PGD before populating
145 	 * it entirely, memblock could allocate a page at a physical address
146 	 * where KASAN is not populated yet and then we'd get a page fault.
147 	 */
148 	if (!early)
149 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
150 }
151 
152 #define kasan_early_shadow_pgd_next			(pgtable_l4_enabled ?	\
153 				(uintptr_t)kasan_early_shadow_pud :		\
154 				(uintptr_t)kasan_early_shadow_pmd)
155 #define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
156 		(pgtable_l4_enabled ?						\
157 			kasan_populate_pud(pgdp, vaddr, next, early) :		\
158 			kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
159 
160 static void __init kasan_populate_pgd(pgd_t *pgdp,
161 				      unsigned long vaddr, unsigned long end,
162 				      bool early)
163 {
164 	phys_addr_t phys_addr;
165 	unsigned long next;
166 
167 	do {
168 		next = pgd_addr_end(vaddr, end);
169 
170 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
171 			if (early) {
172 				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
173 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
174 				continue;
175 			} else if (pgd_page_vaddr(*pgdp) ==
176 				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
177 				/*
178 				 * pgdp can't be none since kasan_early_init
179 				 * initialized all KASAN shadow region with
180 				 * kasan_early_shadow_pud: if this is still the
181 				 * case, that means we can try to allocate a
182 				 * hugepage as a replacement.
183 				 */
184 				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
185 				if (phys_addr) {
186 					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
187 					continue;
188 				}
189 			}
190 		}
191 
192 		kasan_populate_pgd_next(pgdp, vaddr, next, early);
193 	} while (pgdp++, vaddr = next, vaddr != end);
194 }
195 
196 asmlinkage void __init kasan_early_init(void)
197 {
198 	uintptr_t i;
199 
200 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
201 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
202 
203 	for (i = 0; i < PTRS_PER_PTE; ++i)
204 		set_pte(kasan_early_shadow_pte + i,
205 			mk_pte(virt_to_page(kasan_early_shadow_page),
206 			       PAGE_KERNEL));
207 
208 	for (i = 0; i < PTRS_PER_PMD; ++i)
209 		set_pmd(kasan_early_shadow_pmd + i,
210 			pfn_pmd(PFN_DOWN
211 				(__pa((uintptr_t)kasan_early_shadow_pte)),
212 				PAGE_TABLE));
213 
214 	if (pgtable_l4_enabled) {
215 		for (i = 0; i < PTRS_PER_PUD; ++i)
216 			set_pud(kasan_early_shadow_pud + i,
217 				pfn_pud(PFN_DOWN
218 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
219 					PAGE_TABLE));
220 	}
221 
222 	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
223 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
224 
225 	local_flush_tlb_all();
226 }
227 
228 void __init kasan_swapper_init(void)
229 {
230 	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
231 			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
232 
233 	local_flush_tlb_all();
234 }
235 
236 static void __init kasan_populate(void *start, void *end)
237 {
238 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
239 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
240 
241 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
242 
243 	local_flush_tlb_all();
244 	memset(start, KASAN_SHADOW_INIT, end - start);
245 }
246 
247 static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
248 					      unsigned long vaddr, unsigned long end,
249 					      bool kasan_populate)
250 {
251 	unsigned long next;
252 	pud_t *pudp, *base_pud;
253 	pmd_t *base_pmd;
254 	bool is_kasan_pmd;
255 
256 	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
257 	pudp = base_pud + pud_index(vaddr);
258 
259 	if (kasan_populate)
260 		memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
261 		       sizeof(pud_t) * PTRS_PER_PUD);
262 
263 	do {
264 		next = pud_addr_end(vaddr, end);
265 		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
266 
267 		if (is_kasan_pmd) {
268 			base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
269 			set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
270 		}
271 	} while (pudp++, vaddr = next, vaddr != end);
272 }
273 
274 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
275 {
276 	unsigned long next;
277 	void *p;
278 	pgd_t *pgd_k = pgd_offset_k(vaddr);
279 	bool is_kasan_pgd_next;
280 
281 	do {
282 		next = pgd_addr_end(vaddr, end);
283 		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
284 				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
285 
286 		if (is_kasan_pgd_next) {
287 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
288 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
289 		}
290 
291 		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
292 			continue;
293 
294 		kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
295 	} while (pgd_k++, vaddr = next, vaddr != end);
296 }
297 
298 static void __init kasan_shallow_populate(void *start, void *end)
299 {
300 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
301 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
302 
303 	kasan_shallow_populate_pgd(vaddr, vend);
304 	local_flush_tlb_all();
305 }
306 
307 void __init kasan_init(void)
308 {
309 	phys_addr_t p_start, p_end;
310 	u64 i;
311 
312 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
313 		kasan_shallow_populate(
314 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
315 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
316 
317 	/* Populate the linear mapping */
318 	for_each_mem_range(i, &p_start, &p_end) {
319 		void *start = (void *)__va(p_start);
320 		void *end = (void *)__va(p_end);
321 
322 		if (start >= end)
323 			break;
324 
325 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
326 	}
327 
328 	/* Populate kernel, BPF, modules mapping */
329 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
330 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
331 
332 	for (i = 0; i < PTRS_PER_PTE; i++)
333 		set_pte(&kasan_early_shadow_pte[i],
334 			mk_pte(virt_to_page(kasan_early_shadow_page),
335 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
336 					_PAGE_ACCESSED)));
337 
338 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
339 	init_task.kasan_depth = 0;
340 }
341