1 /* 2 * Xtensa KASAN shadow map initialization 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2017 Cadence Design Systems Inc. 9 */ 10 11 #include <linux/memblock.h> 12 #include <linux/init_task.h> 13 #include <linux/kasan.h> 14 #include <linux/kernel.h> 15 #include <asm/initialize_mmu.h> 16 #include <asm/tlbflush.h> 17 #include <asm/traps.h> 18 19 void __init kasan_early_init(void) 20 { 21 unsigned long vaddr = KASAN_SHADOW_START; 22 pgd_t *pgd = pgd_offset_k(vaddr); 23 p4d_t *p4d = p4d_offset(pgd, vaddr); 24 pud_t *pud = pud_offset(p4d, vaddr); 25 pmd_t *pmd = pmd_offset(pud, vaddr); 26 int i; 27 28 for (i = 0; i < PTRS_PER_PTE; ++i) 29 set_pte(kasan_early_shadow_pte + i, 30 mk_pte(virt_to_page(kasan_early_shadow_page), 31 PAGE_KERNEL)); 32 33 for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { 34 BUG_ON(!pmd_none(*pmd)); 35 set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); 36 } 37 early_trap_init(); 38 } 39 40 static void __init populate(void *start, void *end) 41 { 42 unsigned long n_pages = (end - start) / PAGE_SIZE; 43 unsigned long n_pmds = n_pages / PTRS_PER_PTE; 44 unsigned long i, j; 45 unsigned long vaddr = (unsigned long)start; 46 pgd_t *pgd = pgd_offset_k(vaddr); 47 p4d_t *p4d = p4d_offset(pgd, vaddr); 48 pud_t *pud = pud_offset(p4d, vaddr); 49 pmd_t *pmd = pmd_offset(pud, vaddr); 50 pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); 51 52 if (!pte) 53 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 54 __func__, n_pages * sizeof(pte_t), PAGE_SIZE); 55 56 pr_debug("%s: %p - %p\n", __func__, start, end); 57 58 for (i = j = 0; i < n_pmds; ++i) { 59 int k; 60 61 for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { 62 phys_addr_t phys = 63 memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 64 0, 65 MEMBLOCK_ALLOC_ANYWHERE); 66 67 if (!phys) 68 panic("Failed to allocate page table page\n"); 69 70 set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); 71 } 72 } 73 74 for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) 75 set_pmd(pmd + i, __pmd((unsigned long)pte)); 76 77 local_flush_tlb_all(); 78 memset(start, 0, end - start); 79 } 80 81 void __init kasan_init(void) 82 { 83 int i; 84 85 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - 86 (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); 87 BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); 88 89 /* 90 * Replace shadow map pages that cover addresses from VMALLOC area 91 * start to the end of KSEG with clean writable pages. 92 */ 93 populate(kasan_mem_to_shadow((void *)VMALLOC_START), 94 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); 95 96 /* 97 * Write protect kasan_early_shadow_page and zero-initialize it again. 98 */ 99 for (i = 0; i < PTRS_PER_PTE; ++i) 100 set_pte(kasan_early_shadow_pte + i, 101 mk_pte(virt_to_page(kasan_early_shadow_page), 102 PAGE_KERNEL_RO)); 103 104 local_flush_tlb_all(); 105 memset(kasan_early_shadow_page, 0, PAGE_SIZE); 106 107 /* At this point kasan is fully initialized. Enable error messages. */ 108 current->kasan_depth = 0; 109 pr_info("KernelAddressSanitizer initialized\n"); 110 } 111