1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN runtime code that manages shadow memory for 4 * generic and software tag-based KASAN modes. 5 * 6 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 7 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 8 * 9 * Some code borrowed from https://github.com/xairy/kasan-prototype by 10 * Andrey Konovalov <andreyknvl@gmail.com> 11 */ 12 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kfence.h> 17 #include <linux/kmemleak.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/string.h> 21 #include <linux/types.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 27 #include "kasan.h" 28 29 bool __kasan_check_read(const volatile void *p, unsigned int size) 30 { 31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_); 32 } 33 EXPORT_SYMBOL(__kasan_check_read); 34 35 bool __kasan_check_write(const volatile void *p, unsigned int size) 36 { 37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_); 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 #undef memset 42 void *memset(void *addr, int c, size_t len) 43 { 44 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) 45 return NULL; 46 47 return __memset(addr, c, len); 48 } 49 50 #ifdef __HAVE_ARCH_MEMMOVE 51 #undef memmove 52 void *memmove(void *dest, const void *src, size_t len) 53 { 54 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || 55 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) 56 return NULL; 57 58 return __memmove(dest, src, len); 59 } 60 #endif 61 62 #undef memcpy 63 void *memcpy(void *dest, const void *src, size_t len) 64 { 65 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || 66 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) 67 return NULL; 68 69 return __memcpy(dest, src, len); 70 } 71 72 void kasan_poison(const void *addr, size_t size, u8 value, bool init) 73 { 74 void *shadow_start, *shadow_end; 75 76 if (!kasan_arch_is_ready()) 77 return; 78 79 /* 80 * Perform shadow offset calculation based on untagged address, as 81 * some of the callers (e.g. kasan_poison_object_data) pass tagged 82 * addresses to this function. 83 */ 84 addr = kasan_reset_tag(addr); 85 86 /* Skip KFENCE memory if called explicitly outside of sl*b. */ 87 if (is_kfence_address(addr)) 88 return; 89 90 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 91 return; 92 if (WARN_ON(size & KASAN_GRANULE_MASK)) 93 return; 94 95 shadow_start = kasan_mem_to_shadow(addr); 96 shadow_end = kasan_mem_to_shadow(addr + size); 97 98 __memset(shadow_start, value, shadow_end - shadow_start); 99 } 100 EXPORT_SYMBOL(kasan_poison); 101 102 #ifdef CONFIG_KASAN_GENERIC 103 void kasan_poison_last_granule(const void *addr, size_t size) 104 { 105 if (!kasan_arch_is_ready()) 106 return; 107 108 if (size & KASAN_GRANULE_MASK) { 109 u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); 110 *shadow = size & KASAN_GRANULE_MASK; 111 } 112 } 113 #endif 114 115 void kasan_unpoison(const void *addr, size_t size, bool init) 116 { 117 u8 tag = get_tag(addr); 118 119 /* 120 * Perform shadow offset calculation based on untagged address, as 121 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged 122 * addresses to this function. 123 */ 124 addr = kasan_reset_tag(addr); 125 126 /* 127 * Skip KFENCE memory if called explicitly outside of sl*b. Also note 128 * that calls to ksize(), where size is not a multiple of machine-word 129 * size, would otherwise poison the invalid portion of the word. 130 */ 131 if (is_kfence_address(addr)) 132 return; 133 134 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) 135 return; 136 137 /* Unpoison all granules that cover the object. */ 138 kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); 139 140 /* Partially poison the last granule for the generic mode. */ 141 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 142 kasan_poison_last_granule(addr, size); 143 } 144 145 #ifdef CONFIG_MEMORY_HOTPLUG 146 static bool shadow_mapped(unsigned long addr) 147 { 148 pgd_t *pgd = pgd_offset_k(addr); 149 p4d_t *p4d; 150 pud_t *pud; 151 pmd_t *pmd; 152 pte_t *pte; 153 154 if (pgd_none(*pgd)) 155 return false; 156 p4d = p4d_offset(pgd, addr); 157 if (p4d_none(*p4d)) 158 return false; 159 pud = pud_offset(p4d, addr); 160 if (pud_none(*pud)) 161 return false; 162 163 /* 164 * We can't use pud_large() or pud_huge(), the first one is 165 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 166 * pud_bad(), if pud is bad then it's bad because it's huge. 167 */ 168 if (pud_bad(*pud)) 169 return true; 170 pmd = pmd_offset(pud, addr); 171 if (pmd_none(*pmd)) 172 return false; 173 174 if (pmd_bad(*pmd)) 175 return true; 176 pte = pte_offset_kernel(pmd, addr); 177 return !pte_none(*pte); 178 } 179 180 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 181 unsigned long action, void *data) 182 { 183 struct memory_notify *mem_data = data; 184 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 185 unsigned long shadow_end, shadow_size; 186 187 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 188 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 189 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 190 shadow_size = nr_shadow_pages << PAGE_SHIFT; 191 shadow_end = shadow_start + shadow_size; 192 193 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || 194 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) 195 return NOTIFY_BAD; 196 197 switch (action) { 198 case MEM_GOING_ONLINE: { 199 void *ret; 200 201 /* 202 * If shadow is mapped already than it must have been mapped 203 * during the boot. This could happen if we onlining previously 204 * offlined memory. 205 */ 206 if (shadow_mapped(shadow_start)) 207 return NOTIFY_OK; 208 209 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 210 shadow_end, GFP_KERNEL, 211 PAGE_KERNEL, VM_NO_GUARD, 212 pfn_to_nid(mem_data->start_pfn), 213 __builtin_return_address(0)); 214 if (!ret) 215 return NOTIFY_BAD; 216 217 kmemleak_ignore(ret); 218 return NOTIFY_OK; 219 } 220 case MEM_CANCEL_ONLINE: 221 case MEM_OFFLINE: { 222 struct vm_struct *vm; 223 224 /* 225 * shadow_start was either mapped during boot by kasan_init() 226 * or during memory online by __vmalloc_node_range(). 227 * In the latter case we can use vfree() to free shadow. 228 * Non-NULL result of the find_vm_area() will tell us if 229 * that was the second case. 230 * 231 * Currently it's not possible to free shadow mapped 232 * during boot by kasan_init(). It's because the code 233 * to do that hasn't been written yet. So we'll just 234 * leak the memory. 235 */ 236 vm = find_vm_area((void *)shadow_start); 237 if (vm) 238 vfree((void *)shadow_start); 239 } 240 } 241 242 return NOTIFY_OK; 243 } 244 245 static int __init kasan_memhotplug_init(void) 246 { 247 hotplug_memory_notifier(kasan_mem_notifier, 0); 248 249 return 0; 250 } 251 252 core_initcall(kasan_memhotplug_init); 253 #endif 254 255 #ifdef CONFIG_KASAN_VMALLOC 256 257 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, 258 void *unused) 259 { 260 unsigned long page; 261 pte_t pte; 262 263 if (likely(!pte_none(*ptep))) 264 return 0; 265 266 page = __get_free_page(GFP_KERNEL); 267 if (!page) 268 return -ENOMEM; 269 270 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); 271 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); 272 273 spin_lock(&init_mm.page_table_lock); 274 if (likely(pte_none(*ptep))) { 275 set_pte_at(&init_mm, addr, ptep, pte); 276 page = 0; 277 } 278 spin_unlock(&init_mm.page_table_lock); 279 if (page) 280 free_page(page); 281 return 0; 282 } 283 284 int kasan_populate_vmalloc(unsigned long addr, unsigned long size) 285 { 286 unsigned long shadow_start, shadow_end; 287 int ret; 288 289 if (!is_vmalloc_or_module_addr((void *)addr)) 290 return 0; 291 292 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); 293 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); 294 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); 295 shadow_end = ALIGN(shadow_end, PAGE_SIZE); 296 297 ret = apply_to_page_range(&init_mm, shadow_start, 298 shadow_end - shadow_start, 299 kasan_populate_vmalloc_pte, NULL); 300 if (ret) 301 return ret; 302 303 flush_cache_vmap(shadow_start, shadow_end); 304 305 /* 306 * We need to be careful about inter-cpu effects here. Consider: 307 * 308 * CPU#0 CPU#1 309 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; 310 * p[99] = 1; 311 * 312 * With compiler instrumentation, that ends up looking like this: 313 * 314 * CPU#0 CPU#1 315 * // vmalloc() allocates memory 316 * // let a = area->addr 317 * // we reach kasan_populate_vmalloc 318 * // and call kasan_unpoison: 319 * STORE shadow(a), unpoison_val 320 * ... 321 * STORE shadow(a+99), unpoison_val x = LOAD p 322 * // rest of vmalloc process <data dependency> 323 * STORE p, a LOAD shadow(x+99) 324 * 325 * If there is no barrier between the end of unpoisoning the shadow 326 * and the store of the result to p, the stores could be committed 327 * in a different order by CPU#0, and CPU#1 could erroneously observe 328 * poison in the shadow. 329 * 330 * We need some sort of barrier between the stores. 331 * 332 * In the vmalloc() case, this is provided by a smp_wmb() in 333 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in 334 * get_vm_area() and friends, the caller gets shadow allocated but 335 * doesn't have any pages mapped into the virtual address space that 336 * has been reserved. Mapping those pages in will involve taking and 337 * releasing a page-table lock, which will provide the barrier. 338 */ 339 340 return 0; 341 } 342 343 /* 344 * Poison the shadow for a vmalloc region. Called as part of the 345 * freeing process at the time the region is freed. 346 */ 347 void kasan_poison_vmalloc(const void *start, unsigned long size) 348 { 349 if (!is_vmalloc_or_module_addr(start)) 350 return; 351 352 size = round_up(size, KASAN_GRANULE_SIZE); 353 kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); 354 } 355 356 void kasan_unpoison_vmalloc(const void *start, unsigned long size) 357 { 358 if (!is_vmalloc_or_module_addr(start)) 359 return; 360 361 kasan_unpoison(start, size, false); 362 } 363 364 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, 365 void *unused) 366 { 367 unsigned long page; 368 369 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); 370 371 spin_lock(&init_mm.page_table_lock); 372 373 if (likely(!pte_none(*ptep))) { 374 pte_clear(&init_mm, addr, ptep); 375 free_page(page); 376 } 377 spin_unlock(&init_mm.page_table_lock); 378 379 return 0; 380 } 381 382 /* 383 * Release the backing for the vmalloc region [start, end), which 384 * lies within the free region [free_region_start, free_region_end). 385 * 386 * This can be run lazily, long after the region was freed. It runs 387 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap 388 * infrastructure. 389 * 390 * How does this work? 391 * ------------------- 392 * 393 * We have a region that is page aligned, labeled as A. 394 * That might not map onto the shadow in a way that is page-aligned: 395 * 396 * start end 397 * v v 398 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc 399 * -------- -------- -------- -------- -------- 400 * | | | | | 401 * | | | /-------/ | 402 * \-------\|/------/ |/---------------/ 403 * ||| || 404 * |??AAAAAA|AAAAAAAA|AA??????| < shadow 405 * (1) (2) (3) 406 * 407 * First we align the start upwards and the end downwards, so that the 408 * shadow of the region aligns with shadow page boundaries. In the 409 * example, this gives us the shadow page (2). This is the shadow entirely 410 * covered by this allocation. 411 * 412 * Then we have the tricky bits. We want to know if we can free the 413 * partially covered shadow pages - (1) and (3) in the example. For this, 414 * we are given the start and end of the free region that contains this 415 * allocation. Extending our previous example, we could have: 416 * 417 * free_region_start free_region_end 418 * | start end | 419 * v v v v 420 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc 421 * -------- -------- -------- -------- -------- 422 * | | | | | 423 * | | | /-------/ | 424 * \-------\|/------/ |/---------------/ 425 * ||| || 426 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow 427 * (1) (2) (3) 428 * 429 * Once again, we align the start of the free region up, and the end of 430 * the free region down so that the shadow is page aligned. So we can free 431 * page (1) - we know no allocation currently uses anything in that page, 432 * because all of it is in the vmalloc free region. But we cannot free 433 * page (3), because we can't be sure that the rest of it is unused. 434 * 435 * We only consider pages that contain part of the original region for 436 * freeing: we don't try to free other pages from the free region or we'd 437 * end up trying to free huge chunks of virtual address space. 438 * 439 * Concurrency 440 * ----------- 441 * 442 * How do we know that we're not freeing a page that is simultaneously 443 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? 444 * 445 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running 446 * at the same time. While we run under free_vmap_area_lock, the population 447 * code does not. 448 * 449 * free_vmap_area_lock instead operates to ensure that the larger range 450 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and 451 * the per-cpu region-finding algorithm both run under free_vmap_area_lock, 452 * no space identified as free will become used while we are running. This 453 * means that so long as we are careful with alignment and only free shadow 454 * pages entirely covered by the free region, we will not run in to any 455 * trouble - any simultaneous allocations will be for disjoint regions. 456 */ 457 void kasan_release_vmalloc(unsigned long start, unsigned long end, 458 unsigned long free_region_start, 459 unsigned long free_region_end) 460 { 461 void *shadow_start, *shadow_end; 462 unsigned long region_start, region_end; 463 unsigned long size; 464 465 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); 466 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); 467 468 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); 469 470 if (start != region_start && 471 free_region_start < region_start) 472 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; 473 474 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); 475 476 if (end != region_end && 477 free_region_end > region_end) 478 region_end += KASAN_MEMORY_PER_SHADOW_PAGE; 479 480 shadow_start = kasan_mem_to_shadow((void *)region_start); 481 shadow_end = kasan_mem_to_shadow((void *)region_end); 482 483 if (shadow_end > shadow_start) { 484 size = shadow_end - shadow_start; 485 apply_to_existing_page_range(&init_mm, 486 (unsigned long)shadow_start, 487 size, kasan_depopulate_vmalloc_pte, 488 NULL); 489 flush_tlb_kernel_range((unsigned long)shadow_start, 490 (unsigned long)shadow_end); 491 } 492 } 493 494 #else /* CONFIG_KASAN_VMALLOC */ 495 496 int kasan_module_alloc(void *addr, size_t size) 497 { 498 void *ret; 499 size_t scaled_size; 500 size_t shadow_size; 501 unsigned long shadow_start; 502 503 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 504 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> 505 KASAN_SHADOW_SCALE_SHIFT; 506 shadow_size = round_up(scaled_size, PAGE_SIZE); 507 508 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 509 return -EINVAL; 510 511 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 512 shadow_start + shadow_size, 513 GFP_KERNEL, 514 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 515 __builtin_return_address(0)); 516 517 if (ret) { 518 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 519 find_vm_area(addr)->flags |= VM_KASAN; 520 kmemleak_ignore(ret); 521 return 0; 522 } 523 524 return -ENOMEM; 525 } 526 527 void kasan_free_shadow(const struct vm_struct *vm) 528 { 529 if (vm->flags & VM_KASAN) 530 vfree(kasan_mem_to_shadow(vm->addr)); 531 } 532 533 #endif 534