1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2023 Loongson Technology Corporation Limited
4 */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
13
14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15
16 #ifdef __PAGETABLE_P4D_FOLDED
17 #define __pgd_none(early, pgd) (0)
18 #else
19 #define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
21 #endif
22
23 #ifdef __PAGETABLE_PUD_FOLDED
24 #define __p4d_none(early, p4d) (0)
25 #else
26 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
27 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
28 #endif
29
30 #ifdef __PAGETABLE_PMD_FOLDED
31 #define __pud_none(early, pud) (0)
32 #else
33 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
34 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
35 #endif
36
37 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
38 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
39
40 #define __pte_none(early, pte) (early ? pte_none(pte) : \
41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
42
43 bool kasan_early_stage = true;
44
kasan_mem_to_shadow(const void * addr)45 void *kasan_mem_to_shadow(const void *addr)
46 {
47 if (!kasan_arch_is_ready()) {
48 return (void *)(kasan_early_shadow_page);
49 } else {
50 unsigned long maddr = (unsigned long)addr;
51 unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
52 unsigned long offset = 0;
53
54 if (maddr >= FIXADDR_START)
55 return (void *)(kasan_early_shadow_page);
56
57 maddr &= XRANGE_SHADOW_MASK;
58 switch (xrange) {
59 case XKPRANGE_CC_SEG:
60 offset = XKPRANGE_CC_SHADOW_OFFSET;
61 break;
62 case XKPRANGE_UC_SEG:
63 offset = XKPRANGE_UC_SHADOW_OFFSET;
64 break;
65 case XKVRANGE_VC_SEG:
66 offset = XKVRANGE_VC_SHADOW_OFFSET;
67 break;
68 default:
69 WARN_ON(1);
70 return NULL;
71 }
72
73 return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
74 }
75 }
76
kasan_shadow_to_mem(const void * shadow_addr)77 const void *kasan_shadow_to_mem(const void *shadow_addr)
78 {
79 unsigned long addr = (unsigned long)shadow_addr;
80
81 if (unlikely(addr > KASAN_SHADOW_END) ||
82 unlikely(addr < KASAN_SHADOW_START)) {
83 WARN_ON(1);
84 return NULL;
85 }
86
87 if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
88 return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
89 else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
90 return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
91 else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
92 return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
93 else {
94 WARN_ON(1);
95 return NULL;
96 }
97 }
98
99 /*
100 * Alloc memory for shadow memory page table.
101 */
kasan_alloc_zeroed_page(int node)102 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
103 {
104 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
105 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
106 if (!p)
107 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
108 __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
109
110 return __pa(p);
111 }
112
kasan_pte_offset(pmd_t * pmdp,unsigned long addr,int node,bool early)113 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
114 {
115 if (__pmd_none(early, READ_ONCE(*pmdp))) {
116 phys_addr_t pte_phys = early ?
117 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
118 if (!early)
119 memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
120 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
121 }
122
123 return pte_offset_kernel(pmdp, addr);
124 }
125
kasan_pmd_offset(pud_t * pudp,unsigned long addr,int node,bool early)126 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
127 {
128 if (__pud_none(early, READ_ONCE(*pudp))) {
129 phys_addr_t pmd_phys = early ?
130 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
131 if (!early)
132 memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
133 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
134 }
135
136 return pmd_offset(pudp, addr);
137 }
138
kasan_pud_offset(p4d_t * p4dp,unsigned long addr,int node,bool early)139 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
140 {
141 if (__p4d_none(early, READ_ONCE(*p4dp))) {
142 phys_addr_t pud_phys = early ?
143 __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
144 if (!early)
145 memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
146 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
147 }
148
149 return pud_offset(p4dp, addr);
150 }
151
kasan_p4d_offset(pgd_t * pgdp,unsigned long addr,int node,bool early)152 static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
153 {
154 if (__pgd_none(early, pgdp_get(pgdp))) {
155 phys_addr_t p4d_phys = early ?
156 __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
157 if (!early)
158 memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
159 pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
160 }
161
162 return p4d_offset(pgdp, addr);
163 }
164
kasan_pte_populate(pmd_t * pmdp,unsigned long addr,unsigned long end,int node,bool early)165 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
166 unsigned long end, int node, bool early)
167 {
168 unsigned long next;
169 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
170
171 do {
172 phys_addr_t page_phys = early ?
173 __pa_symbol(kasan_early_shadow_page)
174 : kasan_alloc_zeroed_page(node);
175 next = addr + PAGE_SIZE;
176 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
177 } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
178 }
179
kasan_pmd_populate(pud_t * pudp,unsigned long addr,unsigned long end,int node,bool early)180 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
181 unsigned long end, int node, bool early)
182 {
183 unsigned long next;
184 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
185
186 do {
187 next = pmd_addr_end(addr, end);
188 kasan_pte_populate(pmdp, addr, next, node, early);
189 } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
190 }
191
kasan_pud_populate(p4d_t * p4dp,unsigned long addr,unsigned long end,int node,bool early)192 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
193 unsigned long end, int node, bool early)
194 {
195 unsigned long next;
196 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
197
198 do {
199 next = pud_addr_end(addr, end);
200 kasan_pmd_populate(pudp, addr, next, node, early);
201 } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
202 }
203
kasan_p4d_populate(pgd_t * pgdp,unsigned long addr,unsigned long end,int node,bool early)204 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
205 unsigned long end, int node, bool early)
206 {
207 unsigned long next;
208 p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
209
210 do {
211 next = p4d_addr_end(addr, end);
212 kasan_pud_populate(p4dp, addr, next, node, early);
213 } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
214 }
215
kasan_pgd_populate(unsigned long addr,unsigned long end,int node,bool early)216 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
217 int node, bool early)
218 {
219 unsigned long next;
220 pgd_t *pgdp;
221
222 pgdp = pgd_offset_k(addr);
223
224 do {
225 next = pgd_addr_end(addr, end);
226 kasan_p4d_populate(pgdp, addr, next, node, early);
227 } while (pgdp++, addr = next, addr != end);
228
229 }
230
231 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
kasan_map_populate(unsigned long start,unsigned long end,int node)232 static void __init kasan_map_populate(unsigned long start, unsigned long end,
233 int node)
234 {
235 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
236 }
237
kasan_early_init(void)238 asmlinkage void __init kasan_early_init(void)
239 {
240 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
241 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
242 }
243
kasan_set_pgd(pgd_t * pgdp,pgd_t pgdval)244 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
245 {
246 WRITE_ONCE(*pgdp, pgdval);
247 }
248
clear_pgds(unsigned long start,unsigned long end)249 static void __init clear_pgds(unsigned long start, unsigned long end)
250 {
251 /*
252 * Remove references to kasan page tables from
253 * swapper_pg_dir. pgd_clear() can't be used
254 * here because it's nop on 2,3-level pagetable setups
255 */
256 for (; start < end; start = pgd_addr_end(start, end))
257 kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
258 }
259
kasan_init(void)260 void __init kasan_init(void)
261 {
262 u64 i;
263 phys_addr_t pa_start, pa_end;
264
265 /*
266 * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
267 * overflow UINTPTR_MAX and then looks like a user space address.
268 * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
269 * large for Loongson-2K series whose cpu_vabits = 39.
270 */
271 if (KASAN_SHADOW_END < vm_map_base) {
272 pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
273 return;
274 }
275
276 /*
277 * PGD was populated as invalid_pmd_table or invalid_pud_table
278 * in pagetable_init() which depends on how many levels of page
279 * table you are using, but we had to clean the gpd of kasan
280 * shadow memory, as the pgd value is none-zero.
281 * The assertion pgd_none is going to be false and the formal populate
282 * afterwards is not going to create any new pgd at all.
283 */
284 memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
285 csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
286 local_flush_tlb_all();
287
288 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
289
290 /* Maps everything to a single page of zeroes */
291 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
292
293 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
294 kasan_mem_to_shadow((void *)KFENCE_AREA_END));
295
296 kasan_early_stage = false;
297
298 /* Populate the linear mapping */
299 for_each_mem_range(i, &pa_start, &pa_end) {
300 void *start = (void *)phys_to_virt(pa_start);
301 void *end = (void *)phys_to_virt(pa_end);
302
303 if (start >= end)
304 break;
305
306 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
307 (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
308 }
309
310 /* Populate modules mapping */
311 kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
312 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
313 /*
314 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
315 * should make sure that it maps the zero page read-only.
316 */
317 for (i = 0; i < PTRS_PER_PTE; i++)
318 set_pte(&kasan_early_shadow_pte[i],
319 pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
320
321 memset(kasan_early_shadow_page, 0, PAGE_SIZE);
322 csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
323 local_flush_tlb_all();
324
325 /* At this point kasan is fully initialized. Enable error messages */
326 init_task.kasan_depth = 0;
327 pr_info("KernelAddressSanitizer initialized.\n");
328 }
329