xref: /openbmc/linux/arch/x86/mm/kasan_init_64.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
4 #include <linux/bootmem.h>
5 #include <linux/kasan.h>
6 #include <linux/kdebug.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 
12 #include <asm/e820/types.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/pgtable.h>
16 
17 extern struct range pfn_mapped[E820_MAX_ENTRIES];
18 
19 static int __init map_range(struct range *range)
20 {
21 	unsigned long start;
22 	unsigned long end;
23 
24 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
25 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
26 
27 	return vmemmap_populate(start, end, NUMA_NO_NODE);
28 }
29 
30 static void __init clear_pgds(unsigned long start,
31 			unsigned long end)
32 {
33 	pgd_t *pgd;
34 
35 	for (; start < end; start += PGDIR_SIZE) {
36 		pgd = pgd_offset_k(start);
37 		/*
38 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
39 		 * instead.
40 		 */
41 		if (CONFIG_PGTABLE_LEVELS < 5)
42 			p4d_clear(p4d_offset(pgd, start));
43 		else
44 			pgd_clear(pgd);
45 	}
46 }
47 
48 static void __init kasan_map_early_shadow(pgd_t *pgd)
49 {
50 	int i;
51 	unsigned long start = KASAN_SHADOW_START;
52 	unsigned long end = KASAN_SHADOW_END;
53 
54 	for (i = pgd_index(start); start < end; i++) {
55 		switch (CONFIG_PGTABLE_LEVELS) {
56 		case 4:
57 			pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
58 					_KERNPG_TABLE);
59 			break;
60 		case 5:
61 			pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
62 					_KERNPG_TABLE);
63 			break;
64 		default:
65 			BUILD_BUG();
66 		}
67 		start += PGDIR_SIZE;
68 	}
69 }
70 
71 #ifdef CONFIG_KASAN_INLINE
72 static int kasan_die_handler(struct notifier_block *self,
73 			     unsigned long val,
74 			     void *data)
75 {
76 	if (val == DIE_GPF) {
77 		pr_emerg("CONFIG_KASAN_INLINE enabled\n");
78 		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
79 	}
80 	return NOTIFY_OK;
81 }
82 
83 static struct notifier_block kasan_die_notifier = {
84 	.notifier_call = kasan_die_handler,
85 };
86 #endif
87 
88 void __init kasan_early_init(void)
89 {
90 	int i;
91 	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
92 	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
93 	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
94 	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
95 
96 	for (i = 0; i < PTRS_PER_PTE; i++)
97 		kasan_zero_pte[i] = __pte(pte_val);
98 
99 	for (i = 0; i < PTRS_PER_PMD; i++)
100 		kasan_zero_pmd[i] = __pmd(pmd_val);
101 
102 	for (i = 0; i < PTRS_PER_PUD; i++)
103 		kasan_zero_pud[i] = __pud(pud_val);
104 
105 	for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
106 		kasan_zero_p4d[i] = __p4d(p4d_val);
107 
108 	kasan_map_early_shadow(early_top_pgt);
109 	kasan_map_early_shadow(init_top_pgt);
110 }
111 
112 void __init kasan_init(void)
113 {
114 	int i;
115 
116 #ifdef CONFIG_KASAN_INLINE
117 	register_die_notifier(&kasan_die_notifier);
118 #endif
119 
120 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
121 	load_cr3(early_top_pgt);
122 	__flush_tlb_all();
123 
124 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
125 
126 	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
127 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
128 
129 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
130 		if (pfn_mapped[i].end == 0)
131 			break;
132 
133 		if (map_range(&pfn_mapped[i]))
134 			panic("kasan: unable to allocate shadow!");
135 	}
136 	kasan_populate_zero_shadow(
137 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
138 		kasan_mem_to_shadow((void *)__START_KERNEL_map));
139 
140 	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
141 			(unsigned long)kasan_mem_to_shadow(_end),
142 			NUMA_NO_NODE);
143 
144 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
145 			(void *)KASAN_SHADOW_END);
146 
147 	load_cr3(init_top_pgt);
148 	__flush_tlb_all();
149 
150 	/*
151 	 * kasan_zero_page has been used as early shadow memory, thus it may
152 	 * contain some garbage. Now we can clear and write protect it, since
153 	 * after the TLB flush no one should write to it.
154 	 */
155 	memset(kasan_zero_page, 0, PAGE_SIZE);
156 	for (i = 0; i < PTRS_PER_PTE; i++) {
157 		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
158 		set_pte(&kasan_zero_pte[i], pte);
159 	}
160 	/* Flush TLBs again to be sure that write protection applied. */
161 	__flush_tlb_all();
162 
163 	init_task.kasan_depth = 0;
164 	pr_info("KernelAddressSanitizer initialized\n");
165 }
166