1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN shadow initialization code. 4 * 5 * Copyright (c) 2015 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 */ 8 9 #include <linux/memblock.h> 10 #include <linux/init.h> 11 #include <linux/kasan.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/pfn.h> 15 #include <linux/slab.h> 16 17 #include <asm/page.h> 18 #include <asm/pgalloc.h> 19 20 #include "kasan.h" 21 22 /* 23 * This page serves two purposes: 24 * - It used as early shadow memory. The entire shadow region populated 25 * with this page, before we will be able to setup normal shadow memory. 26 * - Latter it reused it as zero shadow to cover large ranges of memory 27 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). 28 */ 29 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss; 30 31 #if CONFIG_PGTABLE_LEVELS > 4 32 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss; 33 static inline bool kasan_p4d_table(pgd_t pgd) 34 { 35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); 36 } 37 #else 38 static inline bool kasan_p4d_table(pgd_t pgd) 39 { 40 return false; 41 } 42 #endif 43 #if CONFIG_PGTABLE_LEVELS > 3 44 pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss; 45 static inline bool kasan_pud_table(p4d_t p4d) 46 { 47 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); 48 } 49 #else 50 static inline bool kasan_pud_table(p4d_t p4d) 51 { 52 return false; 53 } 54 #endif 55 #if CONFIG_PGTABLE_LEVELS > 2 56 pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss; 57 static inline bool kasan_pmd_table(pud_t pud) 58 { 59 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); 60 } 61 #else 62 static inline bool kasan_pmd_table(pud_t pud) 63 { 64 return false; 65 } 66 #endif 67 pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; 68 69 static inline bool kasan_pte_table(pmd_t pmd) 70 { 71 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); 72 } 73 74 static inline bool kasan_early_shadow_page_entry(pte_t pte) 75 { 76 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); 77 } 78 79 static __init void *early_alloc(size_t size, int node) 80 { 81 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 82 MEMBLOCK_ALLOC_ACCESSIBLE, node); 83 84 if (!ptr) 85 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", 86 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); 87 88 return ptr; 89 } 90 91 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, 92 unsigned long end) 93 { 94 pte_t *pte = pte_offset_kernel(pmd, addr); 95 pte_t zero_pte; 96 97 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)), 98 PAGE_KERNEL); 99 zero_pte = pte_wrprotect(zero_pte); 100 101 while (addr + PAGE_SIZE <= end) { 102 set_pte_at(&init_mm, addr, pte, zero_pte); 103 addr += PAGE_SIZE; 104 pte = pte_offset_kernel(pmd, addr); 105 } 106 } 107 108 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, 109 unsigned long end) 110 { 111 pmd_t *pmd = pmd_offset(pud, addr); 112 unsigned long next; 113 114 do { 115 next = pmd_addr_end(addr, end); 116 117 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { 118 pmd_populate_kernel(&init_mm, pmd, 119 lm_alias(kasan_early_shadow_pte)); 120 continue; 121 } 122 123 if (pmd_none(*pmd)) { 124 pte_t *p; 125 126 if (slab_is_available()) 127 p = pte_alloc_one_kernel(&init_mm); 128 else 129 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); 130 if (!p) 131 return -ENOMEM; 132 133 pmd_populate_kernel(&init_mm, pmd, p); 134 } 135 zero_pte_populate(pmd, addr, next); 136 } while (pmd++, addr = next, addr != end); 137 138 return 0; 139 } 140 141 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, 142 unsigned long end) 143 { 144 pud_t *pud = pud_offset(p4d, addr); 145 unsigned long next; 146 147 do { 148 next = pud_addr_end(addr, end); 149 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { 150 pmd_t *pmd; 151 152 pud_populate(&init_mm, pud, 153 lm_alias(kasan_early_shadow_pmd)); 154 pmd = pmd_offset(pud, addr); 155 pmd_populate_kernel(&init_mm, pmd, 156 lm_alias(kasan_early_shadow_pte)); 157 continue; 158 } 159 160 if (pud_none(*pud)) { 161 pmd_t *p; 162 163 if (slab_is_available()) { 164 p = pmd_alloc(&init_mm, pud, addr); 165 if (!p) 166 return -ENOMEM; 167 } else { 168 pud_populate(&init_mm, pud, 169 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); 170 } 171 } 172 zero_pmd_populate(pud, addr, next); 173 } while (pud++, addr = next, addr != end); 174 175 return 0; 176 } 177 178 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, 179 unsigned long end) 180 { 181 p4d_t *p4d = p4d_offset(pgd, addr); 182 unsigned long next; 183 184 do { 185 next = p4d_addr_end(addr, end); 186 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { 187 pud_t *pud; 188 pmd_t *pmd; 189 190 p4d_populate(&init_mm, p4d, 191 lm_alias(kasan_early_shadow_pud)); 192 pud = pud_offset(p4d, addr); 193 pud_populate(&init_mm, pud, 194 lm_alias(kasan_early_shadow_pmd)); 195 pmd = pmd_offset(pud, addr); 196 pmd_populate_kernel(&init_mm, pmd, 197 lm_alias(kasan_early_shadow_pte)); 198 continue; 199 } 200 201 if (p4d_none(*p4d)) { 202 pud_t *p; 203 204 if (slab_is_available()) { 205 p = pud_alloc(&init_mm, p4d, addr); 206 if (!p) 207 return -ENOMEM; 208 } else { 209 p4d_populate(&init_mm, p4d, 210 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); 211 } 212 } 213 zero_pud_populate(p4d, addr, next); 214 } while (p4d++, addr = next, addr != end); 215 216 return 0; 217 } 218 219 /** 220 * kasan_populate_early_shadow - populate shadow memory region with 221 * kasan_early_shadow_page 222 * @shadow_start - start of the memory range to populate 223 * @shadow_end - end of the memory range to populate 224 */ 225 int __ref kasan_populate_early_shadow(const void *shadow_start, 226 const void *shadow_end) 227 { 228 unsigned long addr = (unsigned long)shadow_start; 229 unsigned long end = (unsigned long)shadow_end; 230 pgd_t *pgd = pgd_offset_k(addr); 231 unsigned long next; 232 233 do { 234 next = pgd_addr_end(addr, end); 235 236 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { 237 p4d_t *p4d; 238 pud_t *pud; 239 pmd_t *pmd; 240 241 /* 242 * kasan_early_shadow_pud should be populated with pmds 243 * at this moment. 244 * [pud,pmd]_populate*() below needed only for 245 * 3,2 - level page tables where we don't have 246 * puds,pmds, so pgd_populate(), pud_populate() 247 * is noops. 248 */ 249 pgd_populate(&init_mm, pgd, 250 lm_alias(kasan_early_shadow_p4d)); 251 p4d = p4d_offset(pgd, addr); 252 p4d_populate(&init_mm, p4d, 253 lm_alias(kasan_early_shadow_pud)); 254 pud = pud_offset(p4d, addr); 255 pud_populate(&init_mm, pud, 256 lm_alias(kasan_early_shadow_pmd)); 257 pmd = pmd_offset(pud, addr); 258 pmd_populate_kernel(&init_mm, pmd, 259 lm_alias(kasan_early_shadow_pte)); 260 continue; 261 } 262 263 if (pgd_none(*pgd)) { 264 p4d_t *p; 265 266 if (slab_is_available()) { 267 p = p4d_alloc(&init_mm, pgd, addr); 268 if (!p) 269 return -ENOMEM; 270 } else { 271 pgd_populate(&init_mm, pgd, 272 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); 273 } 274 } 275 zero_p4d_populate(pgd, addr, next); 276 } while (pgd++, addr = next, addr != end); 277 278 return 0; 279 } 280 281 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) 282 { 283 pte_t *pte; 284 int i; 285 286 for (i = 0; i < PTRS_PER_PTE; i++) { 287 pte = pte_start + i; 288 if (!pte_none(*pte)) 289 return; 290 } 291 292 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); 293 pmd_clear(pmd); 294 } 295 296 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud) 297 { 298 pmd_t *pmd; 299 int i; 300 301 for (i = 0; i < PTRS_PER_PMD; i++) { 302 pmd = pmd_start + i; 303 if (!pmd_none(*pmd)) 304 return; 305 } 306 307 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud))); 308 pud_clear(pud); 309 } 310 311 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d) 312 { 313 pud_t *pud; 314 int i; 315 316 for (i = 0; i < PTRS_PER_PUD; i++) { 317 pud = pud_start + i; 318 if (!pud_none(*pud)) 319 return; 320 } 321 322 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d))); 323 p4d_clear(p4d); 324 } 325 326 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) 327 { 328 p4d_t *p4d; 329 int i; 330 331 for (i = 0; i < PTRS_PER_P4D; i++) { 332 p4d = p4d_start + i; 333 if (!p4d_none(*p4d)) 334 return; 335 } 336 337 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd))); 338 pgd_clear(pgd); 339 } 340 341 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, 342 unsigned long end) 343 { 344 unsigned long next; 345 346 for (; addr < end; addr = next, pte++) { 347 next = (addr + PAGE_SIZE) & PAGE_MASK; 348 if (next > end) 349 next = end; 350 351 if (!pte_present(*pte)) 352 continue; 353 354 if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) 355 continue; 356 pte_clear(&init_mm, addr, pte); 357 } 358 } 359 360 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, 361 unsigned long end) 362 { 363 unsigned long next; 364 365 for (; addr < end; addr = next, pmd++) { 366 pte_t *pte; 367 368 next = pmd_addr_end(addr, end); 369 370 if (!pmd_present(*pmd)) 371 continue; 372 373 if (kasan_pte_table(*pmd)) { 374 if (IS_ALIGNED(addr, PMD_SIZE) && 375 IS_ALIGNED(next, PMD_SIZE)) 376 pmd_clear(pmd); 377 continue; 378 } 379 pte = pte_offset_kernel(pmd, addr); 380 kasan_remove_pte_table(pte, addr, next); 381 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); 382 } 383 } 384 385 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr, 386 unsigned long end) 387 { 388 unsigned long next; 389 390 for (; addr < end; addr = next, pud++) { 391 pmd_t *pmd, *pmd_base; 392 393 next = pud_addr_end(addr, end); 394 395 if (!pud_present(*pud)) 396 continue; 397 398 if (kasan_pmd_table(*pud)) { 399 if (IS_ALIGNED(addr, PUD_SIZE) && 400 IS_ALIGNED(next, PUD_SIZE)) 401 pud_clear(pud); 402 continue; 403 } 404 pmd = pmd_offset(pud, addr); 405 pmd_base = pmd_offset(pud, 0); 406 kasan_remove_pmd_table(pmd, addr, next); 407 kasan_free_pmd(pmd_base, pud); 408 } 409 } 410 411 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, 412 unsigned long end) 413 { 414 unsigned long next; 415 416 for (; addr < end; addr = next, p4d++) { 417 pud_t *pud; 418 419 next = p4d_addr_end(addr, end); 420 421 if (!p4d_present(*p4d)) 422 continue; 423 424 if (kasan_pud_table(*p4d)) { 425 if (IS_ALIGNED(addr, P4D_SIZE) && 426 IS_ALIGNED(next, P4D_SIZE)) 427 p4d_clear(p4d); 428 continue; 429 } 430 pud = pud_offset(p4d, addr); 431 kasan_remove_pud_table(pud, addr, next); 432 kasan_free_pud(pud_offset(p4d, 0), p4d); 433 } 434 } 435 436 void kasan_remove_zero_shadow(void *start, unsigned long size) 437 { 438 unsigned long addr, end, next; 439 pgd_t *pgd; 440 441 addr = (unsigned long)kasan_mem_to_shadow(start); 442 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); 443 444 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || 445 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) 446 return; 447 448 for (; addr < end; addr = next) { 449 p4d_t *p4d; 450 451 next = pgd_addr_end(addr, end); 452 453 pgd = pgd_offset_k(addr); 454 if (!pgd_present(*pgd)) 455 continue; 456 457 if (kasan_p4d_table(*pgd)) { 458 if (IS_ALIGNED(addr, PGDIR_SIZE) && 459 IS_ALIGNED(next, PGDIR_SIZE)) 460 pgd_clear(pgd); 461 continue; 462 } 463 464 p4d = p4d_offset(pgd, addr); 465 kasan_remove_p4d_table(p4d, addr, next); 466 kasan_free_p4d(p4d_offset(pgd, 0), pgd); 467 } 468 } 469 470 int kasan_add_zero_shadow(void *start, unsigned long size) 471 { 472 int ret; 473 void *shadow_start, *shadow_end; 474 475 shadow_start = kasan_mem_to_shadow(start); 476 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); 477 478 if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) || 479 WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE)) 480 return -EINVAL; 481 482 ret = kasan_populate_early_shadow(shadow_start, shadow_end); 483 if (ret) 484 kasan_remove_zero_shadow(shadow_start, 485 size >> KASAN_SHADOW_SCALE_SHIFT); 486 return ret; 487 } 488