1 /* 2 * This file contains kasan initialization code for ARM64. 3 * 4 * Copyright (c) 2015 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 */ 12 13 #define pr_fmt(fmt) "kasan: " fmt 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/memblock.h> 17 #include <linux/start_kernel.h> 18 19 #include <asm/mmu_context.h> 20 #include <asm/kernel-pgtable.h> 21 #include <asm/page.h> 22 #include <asm/pgalloc.h> 23 #include <asm/pgtable.h> 24 #include <asm/sections.h> 25 #include <asm/tlbflush.h> 26 27 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); 28 29 static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, 30 unsigned long end) 31 { 32 pte_t *pte; 33 unsigned long next; 34 35 if (pmd_none(*pmd)) 36 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); 37 38 pte = pte_offset_kimg(pmd, addr); 39 do { 40 next = addr + PAGE_SIZE; 41 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), 42 PAGE_KERNEL)); 43 } while (pte++, addr = next, addr != end && pte_none(*pte)); 44 } 45 46 static void __init kasan_early_pmd_populate(pud_t *pud, 47 unsigned long addr, 48 unsigned long end) 49 { 50 pmd_t *pmd; 51 unsigned long next; 52 53 if (pud_none(*pud)) 54 pud_populate(&init_mm, pud, kasan_zero_pmd); 55 56 pmd = pmd_offset_kimg(pud, addr); 57 do { 58 next = pmd_addr_end(addr, end); 59 kasan_early_pte_populate(pmd, addr, next); 60 } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); 61 } 62 63 static void __init kasan_early_pud_populate(pgd_t *pgd, 64 unsigned long addr, 65 unsigned long end) 66 { 67 pud_t *pud; 68 unsigned long next; 69 70 if (pgd_none(*pgd)) 71 pgd_populate(&init_mm, pgd, kasan_zero_pud); 72 73 pud = pud_offset_kimg(pgd, addr); 74 do { 75 next = pud_addr_end(addr, end); 76 kasan_early_pmd_populate(pud, addr, next); 77 } while (pud++, addr = next, addr != end && pud_none(*pud)); 78 } 79 80 static void __init kasan_map_early_shadow(void) 81 { 82 unsigned long addr = KASAN_SHADOW_START; 83 unsigned long end = KASAN_SHADOW_END; 84 unsigned long next; 85 pgd_t *pgd; 86 87 pgd = pgd_offset_k(addr); 88 do { 89 next = pgd_addr_end(addr, end); 90 kasan_early_pud_populate(pgd, addr, next); 91 } while (pgd++, addr = next, addr != end); 92 } 93 94 asmlinkage void __init kasan_early_init(void) 95 { 96 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); 97 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); 98 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); 99 kasan_map_early_shadow(); 100 } 101 102 /* 103 * Copy the current shadow region into a new pgdir. 104 */ 105 void __init kasan_copy_shadow(pgd_t *pgdir) 106 { 107 pgd_t *pgd, *pgd_new, *pgd_end; 108 109 pgd = pgd_offset_k(KASAN_SHADOW_START); 110 pgd_end = pgd_offset_k(KASAN_SHADOW_END); 111 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); 112 do { 113 set_pgd(pgd_new, *pgd); 114 } while (pgd++, pgd_new++, pgd != pgd_end); 115 } 116 117 static void __init clear_pgds(unsigned long start, 118 unsigned long end) 119 { 120 /* 121 * Remove references to kasan page tables from 122 * swapper_pg_dir. pgd_clear() can't be used 123 * here because it's nop on 2,3-level pagetable setups 124 */ 125 for (; start < end; start += PGDIR_SIZE) 126 set_pgd(pgd_offset_k(start), __pgd(0)); 127 } 128 129 void __init kasan_init(void) 130 { 131 u64 kimg_shadow_start, kimg_shadow_end; 132 u64 mod_shadow_start, mod_shadow_end; 133 struct memblock_region *reg; 134 int i; 135 136 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text); 137 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end); 138 139 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); 140 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); 141 142 /* 143 * We are going to perform proper setup of shadow memory. 144 * At first we should unmap early shadow (clear_pgds() call bellow). 145 * However, instrumented code couldn't execute without shadow memory. 146 * tmp_pg_dir used to keep early shadow mapped until full shadow 147 * setup will be finished. 148 */ 149 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); 150 dsb(ishst); 151 cpu_replace_ttbr1(tmp_pg_dir); 152 153 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 154 155 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, 156 pfn_to_nid(virt_to_pfn(_text))); 157 158 /* 159 * vmemmap_populate() has populated the shadow region that covers the 160 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round 161 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent 162 * kasan_populate_zero_shadow() from replacing the page table entries 163 * (PMD or PTE) at the edges of the shadow region for the kernel 164 * image. 165 */ 166 kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE); 167 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE); 168 169 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, 170 (void *)mod_shadow_start); 171 kasan_populate_zero_shadow((void *)kimg_shadow_end, 172 kasan_mem_to_shadow((void *)PAGE_OFFSET)); 173 174 if (kimg_shadow_start > mod_shadow_end) 175 kasan_populate_zero_shadow((void *)mod_shadow_end, 176 (void *)kimg_shadow_start); 177 178 for_each_memblock(memory, reg) { 179 void *start = (void *)__phys_to_virt(reg->base); 180 void *end = (void *)__phys_to_virt(reg->base + reg->size); 181 182 if (start >= end) 183 break; 184 185 /* 186 * end + 1 here is intentional. We check several shadow bytes in 187 * advance to slightly speed up fastpath. In some rare cases 188 * we could cross boundary of mapped shadow, so we just map 189 * some more here. 190 */ 191 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), 192 (unsigned long)kasan_mem_to_shadow(end) + 1, 193 pfn_to_nid(virt_to_pfn(start))); 194 } 195 196 /* 197 * KAsan may reuse the contents of kasan_zero_pte directly, so we 198 * should make sure that it maps the zero page read-only. 199 */ 200 for (i = 0; i < PTRS_PER_PTE; i++) 201 set_pte(&kasan_zero_pte[i], 202 pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); 203 204 memset(kasan_zero_page, 0, PAGE_SIZE); 205 cpu_replace_ttbr1(swapper_pg_dir); 206 207 /* At this point kasan is fully initialized. Enable error messages */ 208 init_task.kasan_depth = 0; 209 pr_info("KernelAddressSanitizer initialized\n"); 210 } 211