xref: /openbmc/linux/arch/arm64/mm/kasan_init.c (revision 62e59c4e)
1 /*
2  * This file contains kasan initialization code for ARM64.
3  *
4  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/sched/task.h>
17 #include <linux/memblock.h>
18 #include <linux/start_kernel.h>
19 #include <linux/mm.h>
20 
21 #include <asm/mmu_context.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/page.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/tlbflush.h>
28 
29 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
30 
31 /*
32  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
33  * directly on kernel symbols (bm_p*d). All the early functions are called too
34  * early to use lm_alias so __p*d_populate functions must be used to populate
35  * with the physical address from __pa_symbol.
36  */
37 
38 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
39 {
40 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
41 					      __pa(MAX_DMA_ADDRESS),
42 					      MEMBLOCK_ALLOC_KASAN, node);
43 	if (!p)
44 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
45 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
46 		      __pa(MAX_DMA_ADDRESS));
47 
48 	return __pa(p);
49 }
50 
51 static phys_addr_t __init kasan_alloc_raw_page(int node)
52 {
53 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
54 						__pa(MAX_DMA_ADDRESS),
55 						MEMBLOCK_ALLOC_KASAN, node);
56 	if (!p)
57 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
58 		      __func__, PAGE_SIZE, PAGE_SIZE, node,
59 		      __pa(MAX_DMA_ADDRESS));
60 
61 	return __pa(p);
62 }
63 
64 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
65 				      bool early)
66 {
67 	if (pmd_none(READ_ONCE(*pmdp))) {
68 		phys_addr_t pte_phys = early ?
69 				__pa_symbol(kasan_early_shadow_pte)
70 					: kasan_alloc_zeroed_page(node);
71 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
72 	}
73 
74 	return early ? pte_offset_kimg(pmdp, addr)
75 		     : pte_offset_kernel(pmdp, addr);
76 }
77 
78 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
79 				      bool early)
80 {
81 	if (pud_none(READ_ONCE(*pudp))) {
82 		phys_addr_t pmd_phys = early ?
83 				__pa_symbol(kasan_early_shadow_pmd)
84 					: kasan_alloc_zeroed_page(node);
85 		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
86 	}
87 
88 	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
89 }
90 
91 static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
92 				      bool early)
93 {
94 	if (pgd_none(READ_ONCE(*pgdp))) {
95 		phys_addr_t pud_phys = early ?
96 				__pa_symbol(kasan_early_shadow_pud)
97 					: kasan_alloc_zeroed_page(node);
98 		__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
99 	}
100 
101 	return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
102 }
103 
104 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
105 				      unsigned long end, int node, bool early)
106 {
107 	unsigned long next;
108 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
109 
110 	do {
111 		phys_addr_t page_phys = early ?
112 				__pa_symbol(kasan_early_shadow_page)
113 					: kasan_alloc_raw_page(node);
114 		if (!early)
115 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
116 		next = addr + PAGE_SIZE;
117 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
118 	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
119 }
120 
121 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
122 				      unsigned long end, int node, bool early)
123 {
124 	unsigned long next;
125 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
126 
127 	do {
128 		next = pmd_addr_end(addr, end);
129 		kasan_pte_populate(pmdp, addr, next, node, early);
130 	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
131 }
132 
133 static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
134 				      unsigned long end, int node, bool early)
135 {
136 	unsigned long next;
137 	pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
138 
139 	do {
140 		next = pud_addr_end(addr, end);
141 		kasan_pmd_populate(pudp, addr, next, node, early);
142 	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
143 }
144 
145 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
146 				      int node, bool early)
147 {
148 	unsigned long next;
149 	pgd_t *pgdp;
150 
151 	pgdp = pgd_offset_k(addr);
152 	do {
153 		next = pgd_addr_end(addr, end);
154 		kasan_pud_populate(pgdp, addr, next, node, early);
155 	} while (pgdp++, addr = next, addr != end);
156 }
157 
158 /* The early shadow maps everything to a single page of zeroes */
159 asmlinkage void __init kasan_early_init(void)
160 {
161 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
162 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
163 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
164 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
165 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
166 			   true);
167 }
168 
169 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
170 static void __init kasan_map_populate(unsigned long start, unsigned long end,
171 				      int node)
172 {
173 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
174 }
175 
176 /*
177  * Copy the current shadow region into a new pgdir.
178  */
179 void __init kasan_copy_shadow(pgd_t *pgdir)
180 {
181 	pgd_t *pgdp, *pgdp_new, *pgdp_end;
182 
183 	pgdp = pgd_offset_k(KASAN_SHADOW_START);
184 	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
185 	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
186 	do {
187 		set_pgd(pgdp_new, READ_ONCE(*pgdp));
188 	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
189 }
190 
191 static void __init clear_pgds(unsigned long start,
192 			unsigned long end)
193 {
194 	/*
195 	 * Remove references to kasan page tables from
196 	 * swapper_pg_dir. pgd_clear() can't be used
197 	 * here because it's nop on 2,3-level pagetable setups
198 	 */
199 	for (; start < end; start += PGDIR_SIZE)
200 		set_pgd(pgd_offset_k(start), __pgd(0));
201 }
202 
203 void __init kasan_init(void)
204 {
205 	u64 kimg_shadow_start, kimg_shadow_end;
206 	u64 mod_shadow_start, mod_shadow_end;
207 	struct memblock_region *reg;
208 	int i;
209 
210 	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
211 	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
212 
213 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
214 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
215 
216 	/*
217 	 * We are going to perform proper setup of shadow memory.
218 	 * At first we should unmap early shadow (clear_pgds() call below).
219 	 * However, instrumented code couldn't execute without shadow memory.
220 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
221 	 * setup will be finished.
222 	 */
223 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
224 	dsb(ishst);
225 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
226 
227 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
228 
229 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
230 			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
231 
232 	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
233 				    (void *)mod_shadow_start);
234 	kasan_populate_early_shadow((void *)kimg_shadow_end,
235 				    kasan_mem_to_shadow((void *)PAGE_OFFSET));
236 
237 	if (kimg_shadow_start > mod_shadow_end)
238 		kasan_populate_early_shadow((void *)mod_shadow_end,
239 					    (void *)kimg_shadow_start);
240 
241 	for_each_memblock(memory, reg) {
242 		void *start = (void *)__phys_to_virt(reg->base);
243 		void *end = (void *)__phys_to_virt(reg->base + reg->size);
244 
245 		if (start >= end)
246 			break;
247 
248 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
249 				   (unsigned long)kasan_mem_to_shadow(end),
250 				   early_pfn_to_nid(virt_to_pfn(start)));
251 	}
252 
253 	/*
254 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
255 	 * so we should make sure that it maps the zero page read-only.
256 	 */
257 	for (i = 0; i < PTRS_PER_PTE; i++)
258 		set_pte(&kasan_early_shadow_pte[i],
259 			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
260 				PAGE_KERNEL_RO));
261 
262 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
263 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
264 
265 	/* At this point kasan is fully initialized. Enable error messages */
266 	init_task.kasan_depth = 0;
267 	pr_info("KernelAddressSanitizer initialized\n");
268 }
269