1c633544aSMax Filippov /*
2c633544aSMax Filippov * Xtensa KASAN shadow map initialization
3c633544aSMax Filippov *
4c633544aSMax Filippov * This file is subject to the terms and conditions of the GNU General Public
5c633544aSMax Filippov * License. See the file "COPYING" in the main directory of this archive
6c633544aSMax Filippov * for more details.
7c633544aSMax Filippov *
8c633544aSMax Filippov * Copyright (C) 2017 Cadence Design Systems Inc.
9c633544aSMax Filippov */
10c633544aSMax Filippov
1157c8a661SMike Rapoport #include <linux/memblock.h>
12c633544aSMax Filippov #include <linux/init_task.h>
13c633544aSMax Filippov #include <linux/kasan.h>
14c633544aSMax Filippov #include <linux/kernel.h>
15c633544aSMax Filippov #include <asm/initialize_mmu.h>
16c633544aSMax Filippov #include <asm/tlbflush.h>
17c633544aSMax Filippov
kasan_early_init(void)18c633544aSMax Filippov void __init kasan_early_init(void)
19c633544aSMax Filippov {
20c633544aSMax Filippov unsigned long vaddr = KASAN_SHADOW_START;
21*e05c7b1fSMike Rapoport pmd_t *pmd = pmd_off_k(vaddr);
22c633544aSMax Filippov int i;
23c633544aSMax Filippov
24c633544aSMax Filippov for (i = 0; i < PTRS_PER_PTE; ++i)
259577dd74SAndrey Konovalov set_pte(kasan_early_shadow_pte + i,
269577dd74SAndrey Konovalov mk_pte(virt_to_page(kasan_early_shadow_page),
279577dd74SAndrey Konovalov PAGE_KERNEL));
28c633544aSMax Filippov
29c633544aSMax Filippov for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
30c633544aSMax Filippov BUG_ON(!pmd_none(*pmd));
319577dd74SAndrey Konovalov set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
32c633544aSMax Filippov }
33c633544aSMax Filippov }
34c633544aSMax Filippov
populate(void * start,void * end)35c633544aSMax Filippov static void __init populate(void *start, void *end)
36c633544aSMax Filippov {
37c633544aSMax Filippov unsigned long n_pages = (end - start) / PAGE_SIZE;
38c633544aSMax Filippov unsigned long n_pmds = n_pages / PTRS_PER_PTE;
39c633544aSMax Filippov unsigned long i, j;
40c633544aSMax Filippov unsigned long vaddr = (unsigned long)start;
41*e05c7b1fSMike Rapoport pmd_t *pmd = pmd_off_k(vaddr);
42eb31d559SMike Rapoport pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
43c633544aSMax Filippov
448a7f97b9SMike Rapoport if (!pte)
458a7f97b9SMike Rapoport panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
468a7f97b9SMike Rapoport __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
478a7f97b9SMike Rapoport
48c633544aSMax Filippov pr_debug("%s: %p - %p\n", __func__, start, end);
49c633544aSMax Filippov
50c633544aSMax Filippov for (i = j = 0; i < n_pmds; ++i) {
51c633544aSMax Filippov int k;
52c633544aSMax Filippov
53c633544aSMax Filippov for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
54c633544aSMax Filippov phys_addr_t phys =
55e64681b4SMax Filippov memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
56e64681b4SMax Filippov 0,
57e64681b4SMax Filippov MEMBLOCK_ALLOC_ANYWHERE);
58c633544aSMax Filippov
59ecc3e771SMike Rapoport if (!phys)
60ecc3e771SMike Rapoport panic("Failed to allocate page table page\n");
61ecc3e771SMike Rapoport
62c633544aSMax Filippov set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
63c633544aSMax Filippov }
64c633544aSMax Filippov }
65c633544aSMax Filippov
66c633544aSMax Filippov for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
67c633544aSMax Filippov set_pmd(pmd + i, __pmd((unsigned long)pte));
68c633544aSMax Filippov
69c633544aSMax Filippov local_flush_tlb_all();
70c633544aSMax Filippov memset(start, 0, end - start);
71c633544aSMax Filippov }
72c633544aSMax Filippov
kasan_init(void)73c633544aSMax Filippov void __init kasan_init(void)
74c633544aSMax Filippov {
75c633544aSMax Filippov int i;
76c633544aSMax Filippov
77c633544aSMax Filippov BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
78c633544aSMax Filippov (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
79c633544aSMax Filippov BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
80c633544aSMax Filippov
81c633544aSMax Filippov /*
82c633544aSMax Filippov * Replace shadow map pages that cover addresses from VMALLOC area
83c633544aSMax Filippov * start to the end of KSEG with clean writable pages.
84c633544aSMax Filippov */
85c633544aSMax Filippov populate(kasan_mem_to_shadow((void *)VMALLOC_START),
86c633544aSMax Filippov kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
87c633544aSMax Filippov
889577dd74SAndrey Konovalov /*
899577dd74SAndrey Konovalov * Write protect kasan_early_shadow_page and zero-initialize it again.
909577dd74SAndrey Konovalov */
91c633544aSMax Filippov for (i = 0; i < PTRS_PER_PTE; ++i)
929577dd74SAndrey Konovalov set_pte(kasan_early_shadow_pte + i,
939577dd74SAndrey Konovalov mk_pte(virt_to_page(kasan_early_shadow_page),
949577dd74SAndrey Konovalov PAGE_KERNEL_RO));
95c633544aSMax Filippov
96c633544aSMax Filippov local_flush_tlb_all();
979577dd74SAndrey Konovalov memset(kasan_early_shadow_page, 0, PAGE_SIZE);
98c633544aSMax Filippov
99c633544aSMax Filippov /* At this point kasan is fully initialized. Enable error messages. */
100c633544aSMax Filippov current->kasan_depth = 0;
101c633544aSMax Filippov pr_info("KernelAddressSanitizer initialized\n");
102c633544aSMax Filippov }
103