xref: /openbmc/linux/arch/arm64/mm/kasan_init.c (revision 20e2fc42)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contains kasan initialization code for ARM64.
4  *
5  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  */
8 
9 #define pr_fmt(fmt) "kasan: " fmt
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/task.h>
13 #include <linux/memblock.h>
14 #include <linux/start_kernel.h>
15 #include <linux/mm.h>
16 
17 #include <asm/mmu_context.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/page.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/sections.h>
23 #include <asm/tlbflush.h>
24 
25 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
26 
27 /*
28  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
29  * directly on kernel symbols (bm_p*d). All the early functions are called too
30  * early to use lm_alias so __p*d_populate functions must be used to populate
31  * with the physical address from __pa_symbol.
32  */
33 
34 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
35 {
36 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
37 					      __pa(MAX_DMA_ADDRESS),
38 					      MEMBLOCK_ALLOC_KASAN, node);
39 	if (!p)
40 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
41 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
42 		      __pa(MAX_DMA_ADDRESS));
43 
44 	return __pa(p);
45 }
46 
47 static phys_addr_t __init kasan_alloc_raw_page(int node)
48 {
49 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
50 						__pa(MAX_DMA_ADDRESS),
51 						MEMBLOCK_ALLOC_KASAN, node);
52 	if (!p)
53 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
54 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
55 		      __pa(MAX_DMA_ADDRESS));
56 
57 	return __pa(p);
58 }
59 
60 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
61 				      bool early)
62 {
63 	if (pmd_none(READ_ONCE(*pmdp))) {
64 		phys_addr_t pte_phys = early ?
65 				__pa_symbol(kasan_early_shadow_pte)
66 					: kasan_alloc_zeroed_page(node);
67 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
68 	}
69 
70 	return early ? pte_offset_kimg(pmdp, addr)
71 		     : pte_offset_kernel(pmdp, addr);
72 }
73 
74 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
75 				      bool early)
76 {
77 	if (pud_none(READ_ONCE(*pudp))) {
78 		phys_addr_t pmd_phys = early ?
79 				__pa_symbol(kasan_early_shadow_pmd)
80 					: kasan_alloc_zeroed_page(node);
81 		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
82 	}
83 
84 	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
85 }
86 
87 static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
88 				      bool early)
89 {
90 	if (pgd_none(READ_ONCE(*pgdp))) {
91 		phys_addr_t pud_phys = early ?
92 				__pa_symbol(kasan_early_shadow_pud)
93 					: kasan_alloc_zeroed_page(node);
94 		__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
95 	}
96 
97 	return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
98 }
99 
100 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
101 				      unsigned long end, int node, bool early)
102 {
103 	unsigned long next;
104 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
105 
106 	do {
107 		phys_addr_t page_phys = early ?
108 				__pa_symbol(kasan_early_shadow_page)
109 					: kasan_alloc_raw_page(node);
110 		if (!early)
111 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
112 		next = addr + PAGE_SIZE;
113 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
114 	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
115 }
116 
117 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
118 				      unsigned long end, int node, bool early)
119 {
120 	unsigned long next;
121 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
122 
123 	do {
124 		next = pmd_addr_end(addr, end);
125 		kasan_pte_populate(pmdp, addr, next, node, early);
126 	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
127 }
128 
129 static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
130 				      unsigned long end, int node, bool early)
131 {
132 	unsigned long next;
133 	pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
134 
135 	do {
136 		next = pud_addr_end(addr, end);
137 		kasan_pmd_populate(pudp, addr, next, node, early);
138 	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
139 }
140 
141 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
142 				      int node, bool early)
143 {
144 	unsigned long next;
145 	pgd_t *pgdp;
146 
147 	pgdp = pgd_offset_k(addr);
148 	do {
149 		next = pgd_addr_end(addr, end);
150 		kasan_pud_populate(pgdp, addr, next, node, early);
151 	} while (pgdp++, addr = next, addr != end);
152 }
153 
154 /* The early shadow maps everything to a single page of zeroes */
155 asmlinkage void __init kasan_early_init(void)
156 {
157 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
158 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
159 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
160 	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
161 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
162 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
163 			   true);
164 }
165 
166 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
167 static void __init kasan_map_populate(unsigned long start, unsigned long end,
168 				      int node)
169 {
170 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
171 }
172 
173 /*
174  * Copy the current shadow region into a new pgdir.
175  */
176 void __init kasan_copy_shadow(pgd_t *pgdir)
177 {
178 	pgd_t *pgdp, *pgdp_new, *pgdp_end;
179 
180 	pgdp = pgd_offset_k(KASAN_SHADOW_START);
181 	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
182 	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
183 	do {
184 		set_pgd(pgdp_new, READ_ONCE(*pgdp));
185 	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
186 }
187 
188 static void __init clear_pgds(unsigned long start,
189 			unsigned long end)
190 {
191 	/*
192 	 * Remove references to kasan page tables from
193 	 * swapper_pg_dir. pgd_clear() can't be used
194 	 * here because it's nop on 2,3-level pagetable setups
195 	 */
196 	for (; start < end; start += PGDIR_SIZE)
197 		set_pgd(pgd_offset_k(start), __pgd(0));
198 }
199 
200 void __init kasan_init(void)
201 {
202 	u64 kimg_shadow_start, kimg_shadow_end;
203 	u64 mod_shadow_start, mod_shadow_end;
204 	struct memblock_region *reg;
205 	int i;
206 
207 	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
208 	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
209 
210 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
211 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
212 
213 	/*
214 	 * We are going to perform proper setup of shadow memory.
215 	 * At first we should unmap early shadow (clear_pgds() call below).
216 	 * However, instrumented code couldn't execute without shadow memory.
217 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
218 	 * setup will be finished.
219 	 */
220 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
221 	dsb(ishst);
222 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
223 
224 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
225 
226 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
227 			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
228 
229 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
230 				   (void *)mod_shadow_start);
231 	kasan_populate_early_shadow((void *)kimg_shadow_end,
232 				   (void *)KASAN_SHADOW_END);
233 
234 	if (kimg_shadow_start > mod_shadow_end)
235 		kasan_populate_early_shadow((void *)mod_shadow_end,
236 					    (void *)kimg_shadow_start);
237 
238 	for_each_memblock(memory, reg) {
239 		void *start = (void *)__phys_to_virt(reg->base);
240 		void *end = (void *)__phys_to_virt(reg->base + reg->size);
241 
242 		if (start >= end)
243 			break;
244 
245 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
246 				   (unsigned long)kasan_mem_to_shadow(end),
247 				   early_pfn_to_nid(virt_to_pfn(start)));
248 	}
249 
250 	/*
251 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
252 	 * so we should make sure that it maps the zero page read-only.
253 	 */
254 	for (i = 0; i < PTRS_PER_PTE; i++)
255 		set_pte(&kasan_early_shadow_pte[i],
256 			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
257 				PAGE_KERNEL_RO));
258 
259 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
260 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
261 
262 	/* At this point kasan is fully initialized. Enable error messages */
263 	init_task.kasan_depth = 0;
264 	pr_info("KernelAddressSanitizer initialized\n");
265 }
266