xref: /openbmc/linux/arch/x86/mm/kasan_init_64.c (revision 12a8cc7f)
1be3606ffSAndrey Ryabinin #define DISABLE_BRANCH_PROFILING
285155229SAndrey Ryabinin #define pr_fmt(fmt) "kasan: " fmt
3ef7f0d6aSAndrey Ryabinin #include <linux/bootmem.h>
4ef7f0d6aSAndrey Ryabinin #include <linux/kasan.h>
5ef7f0d6aSAndrey Ryabinin #include <linux/kdebug.h>
6ef7f0d6aSAndrey Ryabinin #include <linux/mm.h>
7ef7f0d6aSAndrey Ryabinin #include <linux/sched.h>
89164bb4aSIngo Molnar #include <linux/sched/task.h>
9ef7f0d6aSAndrey Ryabinin #include <linux/vmalloc.h>
10ef7f0d6aSAndrey Ryabinin 
115520b7e7SIngo Molnar #include <asm/e820/types.h>
12ef7f0d6aSAndrey Ryabinin #include <asm/tlbflush.h>
13ef7f0d6aSAndrey Ryabinin #include <asm/sections.h>
14b9d05200STom Lendacky #include <asm/pgtable.h>
15ef7f0d6aSAndrey Ryabinin 
1608b46d5dSIngo Molnar extern struct range pfn_mapped[E820_MAX_ENTRIES];
17ef7f0d6aSAndrey Ryabinin 
1812a8cc7fSAndrey Ryabinin static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
1912a8cc7fSAndrey Ryabinin 
20ef7f0d6aSAndrey Ryabinin static int __init map_range(struct range *range)
21ef7f0d6aSAndrey Ryabinin {
22ef7f0d6aSAndrey Ryabinin 	unsigned long start;
23ef7f0d6aSAndrey Ryabinin 	unsigned long end;
24ef7f0d6aSAndrey Ryabinin 
25ef7f0d6aSAndrey Ryabinin 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
26ef7f0d6aSAndrey Ryabinin 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
27ef7f0d6aSAndrey Ryabinin 
284d461333SAndrey Ryabinin 	return vmemmap_populate(start, end, NUMA_NO_NODE);
29ef7f0d6aSAndrey Ryabinin }
30ef7f0d6aSAndrey Ryabinin 
31ef7f0d6aSAndrey Ryabinin static void __init clear_pgds(unsigned long start,
32ef7f0d6aSAndrey Ryabinin 			unsigned long end)
33ef7f0d6aSAndrey Ryabinin {
34d691a3cfSKirill A. Shutemov 	pgd_t *pgd;
3512a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
3612a8cc7fSAndrey Ryabinin 	unsigned long pgd_end = end & PGDIR_MASK;
37d691a3cfSKirill A. Shutemov 
3812a8cc7fSAndrey Ryabinin 	for (; start < pgd_end; start += PGDIR_SIZE) {
39d691a3cfSKirill A. Shutemov 		pgd = pgd_offset_k(start);
40d691a3cfSKirill A. Shutemov 		/*
41d691a3cfSKirill A. Shutemov 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
42d691a3cfSKirill A. Shutemov 		 * instead.
43d691a3cfSKirill A. Shutemov 		 */
44d691a3cfSKirill A. Shutemov 		if (CONFIG_PGTABLE_LEVELS < 5)
45d691a3cfSKirill A. Shutemov 			p4d_clear(p4d_offset(pgd, start));
46d691a3cfSKirill A. Shutemov 		else
47d691a3cfSKirill A. Shutemov 			pgd_clear(pgd);
48d691a3cfSKirill A. Shutemov 	}
4912a8cc7fSAndrey Ryabinin 
5012a8cc7fSAndrey Ryabinin 	pgd = pgd_offset_k(start);
5112a8cc7fSAndrey Ryabinin 	for (; start < end; start += P4D_SIZE)
5212a8cc7fSAndrey Ryabinin 		p4d_clear(p4d_offset(pgd, start));
5312a8cc7fSAndrey Ryabinin }
5412a8cc7fSAndrey Ryabinin 
5512a8cc7fSAndrey Ryabinin static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
5612a8cc7fSAndrey Ryabinin {
5712a8cc7fSAndrey Ryabinin 	unsigned long p4d;
5812a8cc7fSAndrey Ryabinin 
5912a8cc7fSAndrey Ryabinin 	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
6012a8cc7fSAndrey Ryabinin 		return (p4d_t *)pgd;
6112a8cc7fSAndrey Ryabinin 
6212a8cc7fSAndrey Ryabinin 	p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
6312a8cc7fSAndrey Ryabinin 	p4d += __START_KERNEL_map - phys_base;
6412a8cc7fSAndrey Ryabinin 	return (p4d_t *)p4d + p4d_index(addr);
6512a8cc7fSAndrey Ryabinin }
6612a8cc7fSAndrey Ryabinin 
6712a8cc7fSAndrey Ryabinin static void __init kasan_early_p4d_populate(pgd_t *pgd,
6812a8cc7fSAndrey Ryabinin 		unsigned long addr,
6912a8cc7fSAndrey Ryabinin 		unsigned long end)
7012a8cc7fSAndrey Ryabinin {
7112a8cc7fSAndrey Ryabinin 	pgd_t pgd_entry;
7212a8cc7fSAndrey Ryabinin 	p4d_t *p4d, p4d_entry;
7312a8cc7fSAndrey Ryabinin 	unsigned long next;
7412a8cc7fSAndrey Ryabinin 
7512a8cc7fSAndrey Ryabinin 	if (pgd_none(*pgd)) {
7612a8cc7fSAndrey Ryabinin 		pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
7712a8cc7fSAndrey Ryabinin 		set_pgd(pgd, pgd_entry);
7812a8cc7fSAndrey Ryabinin 	}
7912a8cc7fSAndrey Ryabinin 
8012a8cc7fSAndrey Ryabinin 	p4d = early_p4d_offset(pgd, addr);
8112a8cc7fSAndrey Ryabinin 	do {
8212a8cc7fSAndrey Ryabinin 		next = p4d_addr_end(addr, end);
8312a8cc7fSAndrey Ryabinin 
8412a8cc7fSAndrey Ryabinin 		if (!p4d_none(*p4d))
8512a8cc7fSAndrey Ryabinin 			continue;
8612a8cc7fSAndrey Ryabinin 
8712a8cc7fSAndrey Ryabinin 		p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
8812a8cc7fSAndrey Ryabinin 		set_p4d(p4d, p4d_entry);
8912a8cc7fSAndrey Ryabinin 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
90ef7f0d6aSAndrey Ryabinin }
91ef7f0d6aSAndrey Ryabinin 
925d5aa3cfSAlexander Popov static void __init kasan_map_early_shadow(pgd_t *pgd)
93ef7f0d6aSAndrey Ryabinin {
9412a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
9512a8cc7fSAndrey Ryabinin 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
96ef7f0d6aSAndrey Ryabinin 	unsigned long end = KASAN_SHADOW_END;
9712a8cc7fSAndrey Ryabinin 	unsigned long next;
98ef7f0d6aSAndrey Ryabinin 
9912a8cc7fSAndrey Ryabinin 	pgd += pgd_index(addr);
10012a8cc7fSAndrey Ryabinin 	do {
10112a8cc7fSAndrey Ryabinin 		next = pgd_addr_end(addr, end);
10212a8cc7fSAndrey Ryabinin 		kasan_early_p4d_populate(pgd, addr, next);
10312a8cc7fSAndrey Ryabinin 	} while (pgd++, addr = next, addr != end);
104ef7f0d6aSAndrey Ryabinin }
105ef7f0d6aSAndrey Ryabinin 
106ef7f0d6aSAndrey Ryabinin #ifdef CONFIG_KASAN_INLINE
107ef7f0d6aSAndrey Ryabinin static int kasan_die_handler(struct notifier_block *self,
108ef7f0d6aSAndrey Ryabinin 			     unsigned long val,
109ef7f0d6aSAndrey Ryabinin 			     void *data)
110ef7f0d6aSAndrey Ryabinin {
111ef7f0d6aSAndrey Ryabinin 	if (val == DIE_GPF) {
1122ba78056SDmitry Vyukov 		pr_emerg("CONFIG_KASAN_INLINE enabled\n");
1132ba78056SDmitry Vyukov 		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
114ef7f0d6aSAndrey Ryabinin 	}
115ef7f0d6aSAndrey Ryabinin 	return NOTIFY_OK;
116ef7f0d6aSAndrey Ryabinin }
117ef7f0d6aSAndrey Ryabinin 
118ef7f0d6aSAndrey Ryabinin static struct notifier_block kasan_die_notifier = {
119ef7f0d6aSAndrey Ryabinin 	.notifier_call = kasan_die_handler,
120ef7f0d6aSAndrey Ryabinin };
121ef7f0d6aSAndrey Ryabinin #endif
122ef7f0d6aSAndrey Ryabinin 
1235d5aa3cfSAlexander Popov void __init kasan_early_init(void)
1245d5aa3cfSAlexander Popov {
1255d5aa3cfSAlexander Popov 	int i;
12621729f81STom Lendacky 	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
1275d5aa3cfSAlexander Popov 	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
1285d5aa3cfSAlexander Popov 	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
1295480bb61SKirill A. Shutemov 	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
1305d5aa3cfSAlexander Popov 
1315d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PTE; i++)
1325d5aa3cfSAlexander Popov 		kasan_zero_pte[i] = __pte(pte_val);
1335d5aa3cfSAlexander Popov 
1345d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PMD; i++)
1355d5aa3cfSAlexander Popov 		kasan_zero_pmd[i] = __pmd(pmd_val);
1365d5aa3cfSAlexander Popov 
1375d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PUD; i++)
1385d5aa3cfSAlexander Popov 		kasan_zero_pud[i] = __pud(pud_val);
1395d5aa3cfSAlexander Popov 
14012a8cc7fSAndrey Ryabinin 	for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
1415480bb61SKirill A. Shutemov 		kasan_zero_p4d[i] = __p4d(p4d_val);
1425480bb61SKirill A. Shutemov 
14365ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(early_top_pgt);
14465ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(init_top_pgt);
1455d5aa3cfSAlexander Popov }
1465d5aa3cfSAlexander Popov 
147ef7f0d6aSAndrey Ryabinin void __init kasan_init(void)
148ef7f0d6aSAndrey Ryabinin {
149ef7f0d6aSAndrey Ryabinin 	int i;
150ef7f0d6aSAndrey Ryabinin 
151ef7f0d6aSAndrey Ryabinin #ifdef CONFIG_KASAN_INLINE
152ef7f0d6aSAndrey Ryabinin 	register_die_notifier(&kasan_die_notifier);
153ef7f0d6aSAndrey Ryabinin #endif
154ef7f0d6aSAndrey Ryabinin 
15565ade2f8SKirill A. Shutemov 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
15612a8cc7fSAndrey Ryabinin 
15712a8cc7fSAndrey Ryabinin 	/*
15812a8cc7fSAndrey Ryabinin 	 * We use the same shadow offset for 4- and 5-level paging to
15912a8cc7fSAndrey Ryabinin 	 * facilitate boot-time switching between paging modes.
16012a8cc7fSAndrey Ryabinin 	 * As result in 5-level paging mode KASAN_SHADOW_START and
16112a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
16212a8cc7fSAndrey Ryabinin 	 *
16312a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
16412a8cc7fSAndrey Ryabinin 	 * We claim whole PGD entry to make things easier.
16512a8cc7fSAndrey Ryabinin 	 *
16612a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
16712a8cc7fSAndrey Ryabinin 	 * bunch of things like kernel code, modules, EFI mapping, etc.
16812a8cc7fSAndrey Ryabinin 	 * We need to take extra steps to not overwrite them.
16912a8cc7fSAndrey Ryabinin 	 */
17012a8cc7fSAndrey Ryabinin 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
17112a8cc7fSAndrey Ryabinin 		void *ptr;
17212a8cc7fSAndrey Ryabinin 
17312a8cc7fSAndrey Ryabinin 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
17412a8cc7fSAndrey Ryabinin 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
17512a8cc7fSAndrey Ryabinin 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
17612a8cc7fSAndrey Ryabinin 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
17712a8cc7fSAndrey Ryabinin 	}
17812a8cc7fSAndrey Ryabinin 
17965ade2f8SKirill A. Shutemov 	load_cr3(early_top_pgt);
180241d2c54SAndrey Ryabinin 	__flush_tlb_all();
181ef7f0d6aSAndrey Ryabinin 
18212a8cc7fSAndrey Ryabinin 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
183ef7f0d6aSAndrey Ryabinin 
18412a8cc7fSAndrey Ryabinin 	kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
185ef7f0d6aSAndrey Ryabinin 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
186ef7f0d6aSAndrey Ryabinin 
18708b46d5dSIngo Molnar 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
188ef7f0d6aSAndrey Ryabinin 		if (pfn_mapped[i].end == 0)
189ef7f0d6aSAndrey Ryabinin 			break;
190ef7f0d6aSAndrey Ryabinin 
191ef7f0d6aSAndrey Ryabinin 		if (map_range(&pfn_mapped[i]))
192ef7f0d6aSAndrey Ryabinin 			panic("kasan: unable to allocate shadow!");
193ef7f0d6aSAndrey Ryabinin 	}
19469786cdbSAndrey Ryabinin 	kasan_populate_zero_shadow(
19569786cdbSAndrey Ryabinin 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
196c420f167SAndrey Ryabinin 		kasan_mem_to_shadow((void *)__START_KERNEL_map));
197c420f167SAndrey Ryabinin 
198c420f167SAndrey Ryabinin 	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
199c420f167SAndrey Ryabinin 			(unsigned long)kasan_mem_to_shadow(_end),
200c420f167SAndrey Ryabinin 			NUMA_NO_NODE);
201c420f167SAndrey Ryabinin 
20269786cdbSAndrey Ryabinin 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
203ef7f0d6aSAndrey Ryabinin 			(void *)KASAN_SHADOW_END);
204ef7f0d6aSAndrey Ryabinin 
20565ade2f8SKirill A. Shutemov 	load_cr3(init_top_pgt);
206241d2c54SAndrey Ryabinin 	__flush_tlb_all();
20785155229SAndrey Ryabinin 
20869e0210fSAndrey Ryabinin 	/*
20969e0210fSAndrey Ryabinin 	 * kasan_zero_page has been used as early shadow memory, thus it may
210063fb3e5SAndrey Ryabinin 	 * contain some garbage. Now we can clear and write protect it, since
211063fb3e5SAndrey Ryabinin 	 * after the TLB flush no one should write to it.
21269e0210fSAndrey Ryabinin 	 */
21369e0210fSAndrey Ryabinin 	memset(kasan_zero_page, 0, PAGE_SIZE);
214063fb3e5SAndrey Ryabinin 	for (i = 0; i < PTRS_PER_PTE; i++) {
21521729f81STom Lendacky 		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
216063fb3e5SAndrey Ryabinin 		set_pte(&kasan_zero_pte[i], pte);
217063fb3e5SAndrey Ryabinin 	}
218063fb3e5SAndrey Ryabinin 	/* Flush TLBs again to be sure that write protection applied. */
219063fb3e5SAndrey Ryabinin 	__flush_tlb_all();
22069e0210fSAndrey Ryabinin 
22169e0210fSAndrey Ryabinin 	init_task.kasan_depth = 0;
22225add7ecSAndrey Konovalov 	pr_info("KernelAddressSanitizer initialized\n");
223ef7f0d6aSAndrey Ryabinin }
224