xref: /openbmc/linux/arch/xtensa/mm/kasan_init.c (revision 8a7f97b902f4fb0d94b355b6b3f1fbd7154cafb9)
1c633544aSMax Filippov /*
2c633544aSMax Filippov  * Xtensa KASAN shadow map initialization
3c633544aSMax Filippov  *
4c633544aSMax Filippov  * This file is subject to the terms and conditions of the GNU General Public
5c633544aSMax Filippov  * License.  See the file "COPYING" in the main directory of this archive
6c633544aSMax Filippov  * for more details.
7c633544aSMax Filippov  *
8c633544aSMax Filippov  * Copyright (C) 2017 Cadence Design Systems Inc.
9c633544aSMax Filippov  */
10c633544aSMax Filippov 
1157c8a661SMike Rapoport #include <linux/memblock.h>
12c633544aSMax Filippov #include <linux/init_task.h>
13c633544aSMax Filippov #include <linux/kasan.h>
14c633544aSMax Filippov #include <linux/kernel.h>
15c633544aSMax Filippov #include <asm/initialize_mmu.h>
16c633544aSMax Filippov #include <asm/tlbflush.h>
17c633544aSMax Filippov #include <asm/traps.h>
18c633544aSMax Filippov 
19c633544aSMax Filippov void __init kasan_early_init(void)
20c633544aSMax Filippov {
21c633544aSMax Filippov 	unsigned long vaddr = KASAN_SHADOW_START;
22c633544aSMax Filippov 	pgd_t *pgd = pgd_offset_k(vaddr);
23c633544aSMax Filippov 	pmd_t *pmd = pmd_offset(pgd, vaddr);
24c633544aSMax Filippov 	int i;
25c633544aSMax Filippov 
26c633544aSMax Filippov 	for (i = 0; i < PTRS_PER_PTE; ++i)
279577dd74SAndrey Konovalov 		set_pte(kasan_early_shadow_pte + i,
289577dd74SAndrey Konovalov 			mk_pte(virt_to_page(kasan_early_shadow_page),
299577dd74SAndrey Konovalov 				PAGE_KERNEL));
30c633544aSMax Filippov 
31c633544aSMax Filippov 	for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
32c633544aSMax Filippov 		BUG_ON(!pmd_none(*pmd));
339577dd74SAndrey Konovalov 		set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
34c633544aSMax Filippov 	}
35c633544aSMax Filippov 	early_trap_init();
36c633544aSMax Filippov }
37c633544aSMax Filippov 
38c633544aSMax Filippov static void __init populate(void *start, void *end)
39c633544aSMax Filippov {
40c633544aSMax Filippov 	unsigned long n_pages = (end - start) / PAGE_SIZE;
41c633544aSMax Filippov 	unsigned long n_pmds = n_pages / PTRS_PER_PTE;
42c633544aSMax Filippov 	unsigned long i, j;
43c633544aSMax Filippov 	unsigned long vaddr = (unsigned long)start;
44c633544aSMax Filippov 	pgd_t *pgd = pgd_offset_k(vaddr);
45c633544aSMax Filippov 	pmd_t *pmd = pmd_offset(pgd, vaddr);
46eb31d559SMike Rapoport 	pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
47c633544aSMax Filippov 
48*8a7f97b9SMike Rapoport 	if (!pte)
49*8a7f97b9SMike Rapoport 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
50*8a7f97b9SMike Rapoport 		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
51*8a7f97b9SMike Rapoport 
52c633544aSMax Filippov 	pr_debug("%s: %p - %p\n", __func__, start, end);
53c633544aSMax Filippov 
54c633544aSMax Filippov 	for (i = j = 0; i < n_pmds; ++i) {
55c633544aSMax Filippov 		int k;
56c633544aSMax Filippov 
57c633544aSMax Filippov 		for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
58c633544aSMax Filippov 			phys_addr_t phys =
59f240ec09SMike Rapoport 				memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
60c633544aSMax Filippov 
61ecc3e771SMike Rapoport 			if (!phys)
62ecc3e771SMike Rapoport 				panic("Failed to allocate page table page\n");
63ecc3e771SMike Rapoport 
64c633544aSMax Filippov 			set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
65c633544aSMax Filippov 		}
66c633544aSMax Filippov 	}
67c633544aSMax Filippov 
68c633544aSMax Filippov 	for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
69c633544aSMax Filippov 		set_pmd(pmd + i, __pmd((unsigned long)pte));
70c633544aSMax Filippov 
71c633544aSMax Filippov 	local_flush_tlb_all();
72c633544aSMax Filippov 	memset(start, 0, end - start);
73c633544aSMax Filippov }
74c633544aSMax Filippov 
75c633544aSMax Filippov void __init kasan_init(void)
76c633544aSMax Filippov {
77c633544aSMax Filippov 	int i;
78c633544aSMax Filippov 
79c633544aSMax Filippov 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
80c633544aSMax Filippov 		     (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
81c633544aSMax Filippov 	BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
82c633544aSMax Filippov 
83c633544aSMax Filippov 	/*
84c633544aSMax Filippov 	 * Replace shadow map pages that cover addresses from VMALLOC area
85c633544aSMax Filippov 	 * start to the end of KSEG with clean writable pages.
86c633544aSMax Filippov 	 */
87c633544aSMax Filippov 	populate(kasan_mem_to_shadow((void *)VMALLOC_START),
88c633544aSMax Filippov 		 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
89c633544aSMax Filippov 
909577dd74SAndrey Konovalov 	/*
919577dd74SAndrey Konovalov 	 * Write protect kasan_early_shadow_page and zero-initialize it again.
929577dd74SAndrey Konovalov 	 */
93c633544aSMax Filippov 	for (i = 0; i < PTRS_PER_PTE; ++i)
949577dd74SAndrey Konovalov 		set_pte(kasan_early_shadow_pte + i,
959577dd74SAndrey Konovalov 			mk_pte(virt_to_page(kasan_early_shadow_page),
969577dd74SAndrey Konovalov 				PAGE_KERNEL_RO));
97c633544aSMax Filippov 
98c633544aSMax Filippov 	local_flush_tlb_all();
999577dd74SAndrey Konovalov 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
100c633544aSMax Filippov 
101c633544aSMax Filippov 	/* At this point kasan is fully initialized. Enable error messages. */
102c633544aSMax Filippov 	current->kasan_depth = 0;
103c633544aSMax Filippov 	pr_info("KernelAddressSanitizer initialized\n");
104c633544aSMax Filippov }
105