xref: /openbmc/linux/arch/x86/mm/kasan_init_64.c (revision d3964221)
1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
4 #include <linux/bootmem.h>
5 #include <linux/kasan.h>
6 #include <linux/kdebug.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 
12 #include <asm/e820/types.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/pgtable.h>
16 
17 extern struct range pfn_mapped[E820_MAX_ENTRIES];
18 
19 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
20 
21 static int __init map_range(struct range *range)
22 {
23 	unsigned long start;
24 	unsigned long end;
25 
26 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
27 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
28 
29 	return vmemmap_populate(start, end, NUMA_NO_NODE);
30 }
31 
32 static void __init clear_pgds(unsigned long start,
33 			unsigned long end)
34 {
35 	pgd_t *pgd;
36 	/* See comment in kasan_init() */
37 	unsigned long pgd_end = end & PGDIR_MASK;
38 
39 	for (; start < pgd_end; start += PGDIR_SIZE) {
40 		pgd = pgd_offset_k(start);
41 		/*
42 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
43 		 * instead.
44 		 */
45 		if (CONFIG_PGTABLE_LEVELS < 5)
46 			p4d_clear(p4d_offset(pgd, start));
47 		else
48 			pgd_clear(pgd);
49 	}
50 
51 	pgd = pgd_offset_k(start);
52 	for (; start < end; start += P4D_SIZE)
53 		p4d_clear(p4d_offset(pgd, start));
54 }
55 
56 static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
57 {
58 	unsigned long p4d;
59 
60 	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
61 		return (p4d_t *)pgd;
62 
63 	p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
64 	p4d += __START_KERNEL_map - phys_base;
65 	return (p4d_t *)p4d + p4d_index(addr);
66 }
67 
68 static void __init kasan_early_p4d_populate(pgd_t *pgd,
69 		unsigned long addr,
70 		unsigned long end)
71 {
72 	pgd_t pgd_entry;
73 	p4d_t *p4d, p4d_entry;
74 	unsigned long next;
75 
76 	if (pgd_none(*pgd)) {
77 		pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
78 		set_pgd(pgd, pgd_entry);
79 	}
80 
81 	p4d = early_p4d_offset(pgd, addr);
82 	do {
83 		next = p4d_addr_end(addr, end);
84 
85 		if (!p4d_none(*p4d))
86 			continue;
87 
88 		p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
89 		set_p4d(p4d, p4d_entry);
90 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
91 }
92 
93 static void __init kasan_map_early_shadow(pgd_t *pgd)
94 {
95 	/* See comment in kasan_init() */
96 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
97 	unsigned long end = KASAN_SHADOW_END;
98 	unsigned long next;
99 
100 	pgd += pgd_index(addr);
101 	do {
102 		next = pgd_addr_end(addr, end);
103 		kasan_early_p4d_populate(pgd, addr, next);
104 	} while (pgd++, addr = next, addr != end);
105 }
106 
107 #ifdef CONFIG_KASAN_INLINE
108 static int kasan_die_handler(struct notifier_block *self,
109 			     unsigned long val,
110 			     void *data)
111 {
112 	if (val == DIE_GPF) {
113 		pr_emerg("CONFIG_KASAN_INLINE enabled\n");
114 		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
115 	}
116 	return NOTIFY_OK;
117 }
118 
119 static struct notifier_block kasan_die_notifier = {
120 	.notifier_call = kasan_die_handler,
121 };
122 #endif
123 
124 void __init kasan_early_init(void)
125 {
126 	int i;
127 	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
128 	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
129 	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
130 	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
131 
132 	for (i = 0; i < PTRS_PER_PTE; i++)
133 		kasan_zero_pte[i] = __pte(pte_val);
134 
135 	for (i = 0; i < PTRS_PER_PMD; i++)
136 		kasan_zero_pmd[i] = __pmd(pmd_val);
137 
138 	for (i = 0; i < PTRS_PER_PUD; i++)
139 		kasan_zero_pud[i] = __pud(pud_val);
140 
141 	for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
142 		kasan_zero_p4d[i] = __p4d(p4d_val);
143 
144 	kasan_map_early_shadow(early_top_pgt);
145 	kasan_map_early_shadow(init_top_pgt);
146 }
147 
148 void __init kasan_init(void)
149 {
150 	int i;
151 
152 #ifdef CONFIG_KASAN_INLINE
153 	register_die_notifier(&kasan_die_notifier);
154 #endif
155 
156 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
157 
158 	/*
159 	 * We use the same shadow offset for 4- and 5-level paging to
160 	 * facilitate boot-time switching between paging modes.
161 	 * As result in 5-level paging mode KASAN_SHADOW_START and
162 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
163 	 *
164 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
165 	 * We claim whole PGD entry to make things easier.
166 	 *
167 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
168 	 * bunch of things like kernel code, modules, EFI mapping, etc.
169 	 * We need to take extra steps to not overwrite them.
170 	 */
171 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
172 		void *ptr;
173 
174 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
175 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
176 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
177 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
178 	}
179 
180 	load_cr3(early_top_pgt);
181 	__flush_tlb_all();
182 
183 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
184 
185 	kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
186 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
187 
188 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
189 		if (pfn_mapped[i].end == 0)
190 			break;
191 
192 		if (map_range(&pfn_mapped[i]))
193 			panic("kasan: unable to allocate shadow!");
194 	}
195 	kasan_populate_zero_shadow(
196 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
197 		kasan_mem_to_shadow((void *)__START_KERNEL_map));
198 
199 	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
200 			(unsigned long)kasan_mem_to_shadow(_end),
201 			NUMA_NO_NODE);
202 
203 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
204 			(void *)KASAN_SHADOW_END);
205 
206 	load_cr3(init_top_pgt);
207 	__flush_tlb_all();
208 
209 	/*
210 	 * kasan_zero_page has been used as early shadow memory, thus it may
211 	 * contain some garbage. Now we can clear and write protect it, since
212 	 * after the TLB flush no one should write to it.
213 	 */
214 	memset(kasan_zero_page, 0, PAGE_SIZE);
215 	for (i = 0; i < PTRS_PER_PTE; i++) {
216 		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
217 		set_pte(&kasan_zero_pte[i], pte);
218 	}
219 	/* Flush TLBs again to be sure that write protection applied. */
220 	__flush_tlb_all();
221 
222 	init_task.kasan_depth = 0;
223 	pr_info("KernelAddressSanitizer initialized\n");
224 }
225