1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains KASAN runtime code that manages shadow memory for 4 * generic and software tag-based KASAN modes. 5 * 6 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 7 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 8 * 9 * Some code borrowed from https://github.com/xairy/kasan-prototype by 10 * Andrey Konovalov <andreyknvl@gmail.com> 11 */ 12 13 #include <linux/init.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/kfence.h> 17 #include <linux/kmemleak.h> 18 #include <linux/memory.h> 19 #include <linux/mm.h> 20 #include <linux/string.h> 21 #include <linux/types.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 27 #include "kasan.h" 28 29 bool __kasan_check_read(const volatile void *p, unsigned int size) 30 { 31 return kasan_check_range((unsigned long)p, size, false, _RET_IP_); 32 } 33 EXPORT_SYMBOL(__kasan_check_read); 34 35 bool __kasan_check_write(const volatile void *p, unsigned int size) 36 { 37 return kasan_check_range((unsigned long)p, size, true, _RET_IP_); 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 #undef memset 42 void *memset(void *addr, int c, size_t len) 43 { 44 if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) 45 return NULL; 46 47 return __memset(addr, c, len); 48 } 49 50 #ifdef __HAVE_ARCH_MEMMOVE 51 #undef memmove 52 void *memmove(void *dest, const void *src, size_t len) 53 { 54 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || 55 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) 56 return NULL; 57 58 return __memmove(dest, src, len); 59 } 60 #endif 61 62 #undef memcpy 63 void *memcpy(void *dest, const void *src, size_t len) 64 { 65 if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || 66 !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) 67 return NULL; 68 69 return __memcpy(dest, src, len); 70 } 71 72 void kasan_poison(const void *address, size_t size, u8 value) 73 { 74 void *shadow_start, *shadow_end; 75 76 /* 77 * Perform shadow offset calculation based on untagged address, as 78 * some of the callers (e.g. kasan_poison_object_data) pass tagged 79 * addresses to this function. 80 */ 81 address = kasan_reset_tag(address); 82 83 /* Skip KFENCE memory if called explicitly outside of sl*b. */ 84 if (is_kfence_address(address)) 85 return; 86 87 size = round_up(size, KASAN_GRANULE_SIZE); 88 shadow_start = kasan_mem_to_shadow(address); 89 shadow_end = kasan_mem_to_shadow(address + size); 90 91 __memset(shadow_start, value, shadow_end - shadow_start); 92 } 93 EXPORT_SYMBOL(kasan_poison); 94 95 #ifdef CONFIG_KASAN_GENERIC 96 void kasan_poison_last_granule(const void *address, size_t size) 97 { 98 if (size & KASAN_GRANULE_MASK) { 99 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); 100 *shadow = size & KASAN_GRANULE_MASK; 101 } 102 } 103 #endif 104 105 void kasan_unpoison(const void *address, size_t size) 106 { 107 u8 tag = get_tag(address); 108 109 /* 110 * Perform shadow offset calculation based on untagged address, as 111 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged 112 * addresses to this function. 113 */ 114 address = kasan_reset_tag(address); 115 116 /* 117 * Skip KFENCE memory if called explicitly outside of sl*b. Also note 118 * that calls to ksize(), where size is not a multiple of machine-word 119 * size, would otherwise poison the invalid portion of the word. 120 */ 121 if (is_kfence_address(address)) 122 return; 123 124 /* Unpoison round_up(size, KASAN_GRANULE_SIZE) bytes. */ 125 kasan_poison(address, size, tag); 126 127 /* Partially poison the last granule for the generic mode. */ 128 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 129 kasan_poison_last_granule(address, size); 130 } 131 132 #ifdef CONFIG_MEMORY_HOTPLUG 133 static bool shadow_mapped(unsigned long addr) 134 { 135 pgd_t *pgd = pgd_offset_k(addr); 136 p4d_t *p4d; 137 pud_t *pud; 138 pmd_t *pmd; 139 pte_t *pte; 140 141 if (pgd_none(*pgd)) 142 return false; 143 p4d = p4d_offset(pgd, addr); 144 if (p4d_none(*p4d)) 145 return false; 146 pud = pud_offset(p4d, addr); 147 if (pud_none(*pud)) 148 return false; 149 150 /* 151 * We can't use pud_large() or pud_huge(), the first one is 152 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 153 * pud_bad(), if pud is bad then it's bad because it's huge. 154 */ 155 if (pud_bad(*pud)) 156 return true; 157 pmd = pmd_offset(pud, addr); 158 if (pmd_none(*pmd)) 159 return false; 160 161 if (pmd_bad(*pmd)) 162 return true; 163 pte = pte_offset_kernel(pmd, addr); 164 return !pte_none(*pte); 165 } 166 167 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 168 unsigned long action, void *data) 169 { 170 struct memory_notify *mem_data = data; 171 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 172 unsigned long shadow_end, shadow_size; 173 174 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 175 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 176 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 177 shadow_size = nr_shadow_pages << PAGE_SHIFT; 178 shadow_end = shadow_start + shadow_size; 179 180 if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || 181 WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) 182 return NOTIFY_BAD; 183 184 switch (action) { 185 case MEM_GOING_ONLINE: { 186 void *ret; 187 188 /* 189 * If shadow is mapped already than it must have been mapped 190 * during the boot. This could happen if we onlining previously 191 * offlined memory. 192 */ 193 if (shadow_mapped(shadow_start)) 194 return NOTIFY_OK; 195 196 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 197 shadow_end, GFP_KERNEL, 198 PAGE_KERNEL, VM_NO_GUARD, 199 pfn_to_nid(mem_data->start_pfn), 200 __builtin_return_address(0)); 201 if (!ret) 202 return NOTIFY_BAD; 203 204 kmemleak_ignore(ret); 205 return NOTIFY_OK; 206 } 207 case MEM_CANCEL_ONLINE: 208 case MEM_OFFLINE: { 209 struct vm_struct *vm; 210 211 /* 212 * shadow_start was either mapped during boot by kasan_init() 213 * or during memory online by __vmalloc_node_range(). 214 * In the latter case we can use vfree() to free shadow. 215 * Non-NULL result of the find_vm_area() will tell us if 216 * that was the second case. 217 * 218 * Currently it's not possible to free shadow mapped 219 * during boot by kasan_init(). It's because the code 220 * to do that hasn't been written yet. So we'll just 221 * leak the memory. 222 */ 223 vm = find_vm_area((void *)shadow_start); 224 if (vm) 225 vfree((void *)shadow_start); 226 } 227 } 228 229 return NOTIFY_OK; 230 } 231 232 static int __init kasan_memhotplug_init(void) 233 { 234 hotplug_memory_notifier(kasan_mem_notifier, 0); 235 236 return 0; 237 } 238 239 core_initcall(kasan_memhotplug_init); 240 #endif 241 242 #ifdef CONFIG_KASAN_VMALLOC 243 244 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, 245 void *unused) 246 { 247 unsigned long page; 248 pte_t pte; 249 250 if (likely(!pte_none(*ptep))) 251 return 0; 252 253 page = __get_free_page(GFP_KERNEL); 254 if (!page) 255 return -ENOMEM; 256 257 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); 258 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); 259 260 spin_lock(&init_mm.page_table_lock); 261 if (likely(pte_none(*ptep))) { 262 set_pte_at(&init_mm, addr, ptep, pte); 263 page = 0; 264 } 265 spin_unlock(&init_mm.page_table_lock); 266 if (page) 267 free_page(page); 268 return 0; 269 } 270 271 int kasan_populate_vmalloc(unsigned long addr, unsigned long size) 272 { 273 unsigned long shadow_start, shadow_end; 274 int ret; 275 276 if (!is_vmalloc_or_module_addr((void *)addr)) 277 return 0; 278 279 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); 280 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); 281 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); 282 shadow_end = ALIGN(shadow_end, PAGE_SIZE); 283 284 ret = apply_to_page_range(&init_mm, shadow_start, 285 shadow_end - shadow_start, 286 kasan_populate_vmalloc_pte, NULL); 287 if (ret) 288 return ret; 289 290 flush_cache_vmap(shadow_start, shadow_end); 291 292 /* 293 * We need to be careful about inter-cpu effects here. Consider: 294 * 295 * CPU#0 CPU#1 296 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; 297 * p[99] = 1; 298 * 299 * With compiler instrumentation, that ends up looking like this: 300 * 301 * CPU#0 CPU#1 302 * // vmalloc() allocates memory 303 * // let a = area->addr 304 * // we reach kasan_populate_vmalloc 305 * // and call kasan_unpoison: 306 * STORE shadow(a), unpoison_val 307 * ... 308 * STORE shadow(a+99), unpoison_val x = LOAD p 309 * // rest of vmalloc process <data dependency> 310 * STORE p, a LOAD shadow(x+99) 311 * 312 * If there is no barrier between the end of unpoisioning the shadow 313 * and the store of the result to p, the stores could be committed 314 * in a different order by CPU#0, and CPU#1 could erroneously observe 315 * poison in the shadow. 316 * 317 * We need some sort of barrier between the stores. 318 * 319 * In the vmalloc() case, this is provided by a smp_wmb() in 320 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in 321 * get_vm_area() and friends, the caller gets shadow allocated but 322 * doesn't have any pages mapped into the virtual address space that 323 * has been reserved. Mapping those pages in will involve taking and 324 * releasing a page-table lock, which will provide the barrier. 325 */ 326 327 return 0; 328 } 329 330 /* 331 * Poison the shadow for a vmalloc region. Called as part of the 332 * freeing process at the time the region is freed. 333 */ 334 void kasan_poison_vmalloc(const void *start, unsigned long size) 335 { 336 if (!is_vmalloc_or_module_addr(start)) 337 return; 338 339 size = round_up(size, KASAN_GRANULE_SIZE); 340 kasan_poison(start, size, KASAN_VMALLOC_INVALID); 341 } 342 343 void kasan_unpoison_vmalloc(const void *start, unsigned long size) 344 { 345 if (!is_vmalloc_or_module_addr(start)) 346 return; 347 348 kasan_unpoison(start, size); 349 } 350 351 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, 352 void *unused) 353 { 354 unsigned long page; 355 356 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); 357 358 spin_lock(&init_mm.page_table_lock); 359 360 if (likely(!pte_none(*ptep))) { 361 pte_clear(&init_mm, addr, ptep); 362 free_page(page); 363 } 364 spin_unlock(&init_mm.page_table_lock); 365 366 return 0; 367 } 368 369 /* 370 * Release the backing for the vmalloc region [start, end), which 371 * lies within the free region [free_region_start, free_region_end). 372 * 373 * This can be run lazily, long after the region was freed. It runs 374 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap 375 * infrastructure. 376 * 377 * How does this work? 378 * ------------------- 379 * 380 * We have a region that is page aligned, labelled as A. 381 * That might not map onto the shadow in a way that is page-aligned: 382 * 383 * start end 384 * v v 385 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc 386 * -------- -------- -------- -------- -------- 387 * | | | | | 388 * | | | /-------/ | 389 * \-------\|/------/ |/---------------/ 390 * ||| || 391 * |??AAAAAA|AAAAAAAA|AA??????| < shadow 392 * (1) (2) (3) 393 * 394 * First we align the start upwards and the end downwards, so that the 395 * shadow of the region aligns with shadow page boundaries. In the 396 * example, this gives us the shadow page (2). This is the shadow entirely 397 * covered by this allocation. 398 * 399 * Then we have the tricky bits. We want to know if we can free the 400 * partially covered shadow pages - (1) and (3) in the example. For this, 401 * we are given the start and end of the free region that contains this 402 * allocation. Extending our previous example, we could have: 403 * 404 * free_region_start free_region_end 405 * | start end | 406 * v v v v 407 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc 408 * -------- -------- -------- -------- -------- 409 * | | | | | 410 * | | | /-------/ | 411 * \-------\|/------/ |/---------------/ 412 * ||| || 413 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow 414 * (1) (2) (3) 415 * 416 * Once again, we align the start of the free region up, and the end of 417 * the free region down so that the shadow is page aligned. So we can free 418 * page (1) - we know no allocation currently uses anything in that page, 419 * because all of it is in the vmalloc free region. But we cannot free 420 * page (3), because we can't be sure that the rest of it is unused. 421 * 422 * We only consider pages that contain part of the original region for 423 * freeing: we don't try to free other pages from the free region or we'd 424 * end up trying to free huge chunks of virtual address space. 425 * 426 * Concurrency 427 * ----------- 428 * 429 * How do we know that we're not freeing a page that is simultaneously 430 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? 431 * 432 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running 433 * at the same time. While we run under free_vmap_area_lock, the population 434 * code does not. 435 * 436 * free_vmap_area_lock instead operates to ensure that the larger range 437 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and 438 * the per-cpu region-finding algorithm both run under free_vmap_area_lock, 439 * no space identified as free will become used while we are running. This 440 * means that so long as we are careful with alignment and only free shadow 441 * pages entirely covered by the free region, we will not run in to any 442 * trouble - any simultaneous allocations will be for disjoint regions. 443 */ 444 void kasan_release_vmalloc(unsigned long start, unsigned long end, 445 unsigned long free_region_start, 446 unsigned long free_region_end) 447 { 448 void *shadow_start, *shadow_end; 449 unsigned long region_start, region_end; 450 unsigned long size; 451 452 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); 453 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); 454 455 free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); 456 457 if (start != region_start && 458 free_region_start < region_start) 459 region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; 460 461 free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); 462 463 if (end != region_end && 464 free_region_end > region_end) 465 region_end += KASAN_MEMORY_PER_SHADOW_PAGE; 466 467 shadow_start = kasan_mem_to_shadow((void *)region_start); 468 shadow_end = kasan_mem_to_shadow((void *)region_end); 469 470 if (shadow_end > shadow_start) { 471 size = shadow_end - shadow_start; 472 apply_to_existing_page_range(&init_mm, 473 (unsigned long)shadow_start, 474 size, kasan_depopulate_vmalloc_pte, 475 NULL); 476 flush_tlb_kernel_range((unsigned long)shadow_start, 477 (unsigned long)shadow_end); 478 } 479 } 480 481 #else /* CONFIG_KASAN_VMALLOC */ 482 483 int kasan_module_alloc(void *addr, size_t size) 484 { 485 void *ret; 486 size_t scaled_size; 487 size_t shadow_size; 488 unsigned long shadow_start; 489 490 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 491 scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> 492 KASAN_SHADOW_SCALE_SHIFT; 493 shadow_size = round_up(scaled_size, PAGE_SIZE); 494 495 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 496 return -EINVAL; 497 498 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 499 shadow_start + shadow_size, 500 GFP_KERNEL, 501 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 502 __builtin_return_address(0)); 503 504 if (ret) { 505 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 506 find_vm_area(addr)->flags |= VM_KASAN; 507 kmemleak_ignore(ret); 508 return 0; 509 } 510 511 return -ENOMEM; 512 } 513 514 void kasan_free_shadow(const struct vm_struct *vm) 515 { 516 if (vm->flags & VM_KASAN) 517 vfree(kasan_mem_to_shadow(vm->addr)); 518 } 519 520 #endif 521