1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains common generic and tag-based KASAN code. 4 * 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * 8 * Some code borrowed from https://github.com/xairy/kasan-prototype by 9 * Andrey Konovalov <andreyknvl@gmail.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 */ 16 17 #define __KASAN_INTERNAL 18 19 #include <linux/export.h> 20 #include <linux/interrupt.h> 21 #include <linux/init.h> 22 #include <linux/kasan.h> 23 #include <linux/kernel.h> 24 #include <linux/kmemleak.h> 25 #include <linux/linkage.h> 26 #include <linux/memblock.h> 27 #include <linux/memory.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <linux/printk.h> 31 #include <linux/sched.h> 32 #include <linux/sched/task_stack.h> 33 #include <linux/slab.h> 34 #include <linux/stacktrace.h> 35 #include <linux/string.h> 36 #include <linux/types.h> 37 #include <linux/vmalloc.h> 38 #include <linux/bug.h> 39 #include <linux/uaccess.h> 40 41 #include "kasan.h" 42 #include "../slab.h" 43 44 static inline int in_irqentry_text(unsigned long ptr) 45 { 46 return (ptr >= (unsigned long)&__irqentry_text_start && 47 ptr < (unsigned long)&__irqentry_text_end) || 48 (ptr >= (unsigned long)&__softirqentry_text_start && 49 ptr < (unsigned long)&__softirqentry_text_end); 50 } 51 52 static inline unsigned int filter_irq_stacks(unsigned long *entries, 53 unsigned int nr_entries) 54 { 55 unsigned int i; 56 57 for (i = 0; i < nr_entries; i++) { 58 if (in_irqentry_text(entries[i])) { 59 /* Include the irqentry function into the stack. */ 60 return i + 1; 61 } 62 } 63 return nr_entries; 64 } 65 66 static inline depot_stack_handle_t save_stack(gfp_t flags) 67 { 68 unsigned long entries[KASAN_STACK_DEPTH]; 69 unsigned int nr_entries; 70 71 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 72 nr_entries = filter_irq_stacks(entries, nr_entries); 73 return stack_depot_save(entries, nr_entries, flags); 74 } 75 76 static inline void set_track(struct kasan_track *track, gfp_t flags) 77 { 78 track->pid = current->pid; 79 track->stack = save_stack(flags); 80 } 81 82 void kasan_enable_current(void) 83 { 84 current->kasan_depth++; 85 } 86 87 void kasan_disable_current(void) 88 { 89 current->kasan_depth--; 90 } 91 92 void kasan_check_read(const volatile void *p, unsigned int size) 93 { 94 check_memory_region((unsigned long)p, size, false, _RET_IP_); 95 } 96 EXPORT_SYMBOL(kasan_check_read); 97 98 void kasan_check_write(const volatile void *p, unsigned int size) 99 { 100 check_memory_region((unsigned long)p, size, true, _RET_IP_); 101 } 102 EXPORT_SYMBOL(kasan_check_write); 103 104 #undef memset 105 void *memset(void *addr, int c, size_t len) 106 { 107 check_memory_region((unsigned long)addr, len, true, _RET_IP_); 108 109 return __memset(addr, c, len); 110 } 111 112 #undef memmove 113 void *memmove(void *dest, const void *src, size_t len) 114 { 115 check_memory_region((unsigned long)src, len, false, _RET_IP_); 116 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 117 118 return __memmove(dest, src, len); 119 } 120 121 #undef memcpy 122 void *memcpy(void *dest, const void *src, size_t len) 123 { 124 check_memory_region((unsigned long)src, len, false, _RET_IP_); 125 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 126 127 return __memcpy(dest, src, len); 128 } 129 130 /* 131 * Poisons the shadow memory for 'size' bytes starting from 'addr'. 132 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. 133 */ 134 void kasan_poison_shadow(const void *address, size_t size, u8 value) 135 { 136 void *shadow_start, *shadow_end; 137 138 /* 139 * Perform shadow offset calculation based on untagged address, as 140 * some of the callers (e.g. kasan_poison_object_data) pass tagged 141 * addresses to this function. 142 */ 143 address = reset_tag(address); 144 145 shadow_start = kasan_mem_to_shadow(address); 146 shadow_end = kasan_mem_to_shadow(address + size); 147 148 __memset(shadow_start, value, shadow_end - shadow_start); 149 } 150 151 void kasan_unpoison_shadow(const void *address, size_t size) 152 { 153 u8 tag = get_tag(address); 154 155 /* 156 * Perform shadow offset calculation based on untagged address, as 157 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged 158 * addresses to this function. 159 */ 160 address = reset_tag(address); 161 162 kasan_poison_shadow(address, size, tag); 163 164 if (size & KASAN_SHADOW_MASK) { 165 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); 166 167 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 168 *shadow = tag; 169 else 170 *shadow = size & KASAN_SHADOW_MASK; 171 } 172 } 173 174 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) 175 { 176 void *base = task_stack_page(task); 177 size_t size = sp - base; 178 179 kasan_unpoison_shadow(base, size); 180 } 181 182 /* Unpoison the entire stack for a task. */ 183 void kasan_unpoison_task_stack(struct task_struct *task) 184 { 185 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); 186 } 187 188 /* Unpoison the stack for the current task beyond a watermark sp value. */ 189 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) 190 { 191 /* 192 * Calculate the task stack base address. Avoid using 'current' 193 * because this function is called by early resume code which hasn't 194 * yet set up the percpu register (%gs). 195 */ 196 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); 197 198 kasan_unpoison_shadow(base, watermark - base); 199 } 200 201 /* 202 * Clear all poison for the region between the current SP and a provided 203 * watermark value, as is sometimes required prior to hand-crafted asm function 204 * returns in the middle of functions. 205 */ 206 void kasan_unpoison_stack_above_sp_to(const void *watermark) 207 { 208 const void *sp = __builtin_frame_address(0); 209 size_t size = watermark - sp; 210 211 if (WARN_ON(sp > watermark)) 212 return; 213 kasan_unpoison_shadow(sp, size); 214 } 215 216 void kasan_alloc_pages(struct page *page, unsigned int order) 217 { 218 u8 tag; 219 unsigned long i; 220 221 if (unlikely(PageHighMem(page))) 222 return; 223 224 tag = random_tag(); 225 for (i = 0; i < (1 << order); i++) 226 page_kasan_tag_set(page + i, tag); 227 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); 228 } 229 230 void kasan_free_pages(struct page *page, unsigned int order) 231 { 232 if (likely(!PageHighMem(page))) 233 kasan_poison_shadow(page_address(page), 234 PAGE_SIZE << order, 235 KASAN_FREE_PAGE); 236 } 237 238 /* 239 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. 240 * For larger allocations larger redzones are used. 241 */ 242 static inline unsigned int optimal_redzone(unsigned int object_size) 243 { 244 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 245 return 0; 246 247 return 248 object_size <= 64 - 16 ? 16 : 249 object_size <= 128 - 32 ? 32 : 250 object_size <= 512 - 64 ? 64 : 251 object_size <= 4096 - 128 ? 128 : 252 object_size <= (1 << 14) - 256 ? 256 : 253 object_size <= (1 << 15) - 512 ? 512 : 254 object_size <= (1 << 16) - 1024 ? 1024 : 2048; 255 } 256 257 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 258 slab_flags_t *flags) 259 { 260 unsigned int orig_size = *size; 261 unsigned int redzone_size; 262 int redzone_adjust; 263 264 /* Add alloc meta. */ 265 cache->kasan_info.alloc_meta_offset = *size; 266 *size += sizeof(struct kasan_alloc_meta); 267 268 /* Add free meta. */ 269 if (IS_ENABLED(CONFIG_KASAN_GENERIC) && 270 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || 271 cache->object_size < sizeof(struct kasan_free_meta))) { 272 cache->kasan_info.free_meta_offset = *size; 273 *size += sizeof(struct kasan_free_meta); 274 } 275 276 redzone_size = optimal_redzone(cache->object_size); 277 redzone_adjust = redzone_size - (*size - cache->object_size); 278 if (redzone_adjust > 0) 279 *size += redzone_adjust; 280 281 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, 282 max(*size, cache->object_size + redzone_size)); 283 284 /* 285 * If the metadata doesn't fit, don't enable KASAN at all. 286 */ 287 if (*size <= cache->kasan_info.alloc_meta_offset || 288 *size <= cache->kasan_info.free_meta_offset) { 289 cache->kasan_info.alloc_meta_offset = 0; 290 cache->kasan_info.free_meta_offset = 0; 291 *size = orig_size; 292 return; 293 } 294 295 *flags |= SLAB_KASAN; 296 } 297 298 size_t kasan_metadata_size(struct kmem_cache *cache) 299 { 300 return (cache->kasan_info.alloc_meta_offset ? 301 sizeof(struct kasan_alloc_meta) : 0) + 302 (cache->kasan_info.free_meta_offset ? 303 sizeof(struct kasan_free_meta) : 0); 304 } 305 306 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, 307 const void *object) 308 { 309 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); 310 return (void *)object + cache->kasan_info.alloc_meta_offset; 311 } 312 313 struct kasan_free_meta *get_free_info(struct kmem_cache *cache, 314 const void *object) 315 { 316 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); 317 return (void *)object + cache->kasan_info.free_meta_offset; 318 } 319 320 void kasan_poison_slab(struct page *page) 321 { 322 unsigned long i; 323 324 for (i = 0; i < (1 << compound_order(page)); i++) 325 page_kasan_tag_reset(page + i); 326 kasan_poison_shadow(page_address(page), 327 PAGE_SIZE << compound_order(page), 328 KASAN_KMALLOC_REDZONE); 329 } 330 331 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) 332 { 333 kasan_unpoison_shadow(object, cache->object_size); 334 } 335 336 void kasan_poison_object_data(struct kmem_cache *cache, void *object) 337 { 338 kasan_poison_shadow(object, 339 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 340 KASAN_KMALLOC_REDZONE); 341 } 342 343 /* 344 * This function assigns a tag to an object considering the following: 345 * 1. A cache might have a constructor, which might save a pointer to a slab 346 * object somewhere (e.g. in the object itself). We preassign a tag for 347 * each object in caches with constructors during slab creation and reuse 348 * the same tag each time a particular object is allocated. 349 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be 350 * accessed after being freed. We preassign tags for objects in these 351 * caches as well. 352 * 3. For SLAB allocator we can't preassign tags randomly since the freelist 353 * is stored as an array of indexes instead of a linked list. Assign tags 354 * based on objects indexes, so that objects that are next to each other 355 * get different tags. 356 */ 357 static u8 assign_tag(struct kmem_cache *cache, const void *object, 358 bool init, bool keep_tag) 359 { 360 /* 361 * 1. When an object is kmalloc()'ed, two hooks are called: 362 * kasan_slab_alloc() and kasan_kmalloc(). We assign the 363 * tag only in the first one. 364 * 2. We reuse the same tag for krealloc'ed objects. 365 */ 366 if (keep_tag) 367 return get_tag(object); 368 369 /* 370 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU 371 * set, assign a tag when the object is being allocated (init == false). 372 */ 373 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 374 return init ? KASAN_TAG_KERNEL : random_tag(); 375 376 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ 377 #ifdef CONFIG_SLAB 378 /* For SLAB assign tags based on the object index in the freelist. */ 379 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 380 #else 381 /* 382 * For SLUB assign a random tag during slab creation, otherwise reuse 383 * the already assigned tag. 384 */ 385 return init ? random_tag() : get_tag(object); 386 #endif 387 } 388 389 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, 390 const void *object) 391 { 392 struct kasan_alloc_meta *alloc_info; 393 394 if (!(cache->flags & SLAB_KASAN)) 395 return (void *)object; 396 397 alloc_info = get_alloc_info(cache, object); 398 __memset(alloc_info, 0, sizeof(*alloc_info)); 399 400 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 401 object = set_tag(object, 402 assign_tag(cache, object, true, false)); 403 404 return (void *)object; 405 } 406 407 static inline bool shadow_invalid(u8 tag, s8 shadow_byte) 408 { 409 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 410 return shadow_byte < 0 || 411 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 412 else 413 return tag != (u8)shadow_byte; 414 } 415 416 static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 417 unsigned long ip, bool quarantine) 418 { 419 s8 shadow_byte; 420 u8 tag; 421 void *tagged_object; 422 unsigned long rounded_up_size; 423 424 tag = get_tag(object); 425 tagged_object = object; 426 object = reset_tag(object); 427 428 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != 429 object)) { 430 kasan_report_invalid_free(tagged_object, ip); 431 return true; 432 } 433 434 /* RCU slabs could be legally used after free within the RCU period */ 435 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) 436 return false; 437 438 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); 439 if (shadow_invalid(tag, shadow_byte)) { 440 kasan_report_invalid_free(tagged_object, ip); 441 return true; 442 } 443 444 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); 445 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 446 447 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || 448 unlikely(!(cache->flags & SLAB_KASAN))) 449 return false; 450 451 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); 452 quarantine_put(get_free_info(cache, object), cache); 453 454 return IS_ENABLED(CONFIG_KASAN_GENERIC); 455 } 456 457 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) 458 { 459 return __kasan_slab_free(cache, object, ip, true); 460 } 461 462 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, 463 size_t size, gfp_t flags, bool keep_tag) 464 { 465 unsigned long redzone_start; 466 unsigned long redzone_end; 467 u8 tag; 468 469 if (gfpflags_allow_blocking(flags)) 470 quarantine_reduce(); 471 472 if (unlikely(object == NULL)) 473 return NULL; 474 475 redzone_start = round_up((unsigned long)(object + size), 476 KASAN_SHADOW_SCALE_SIZE); 477 redzone_end = round_up((unsigned long)object + cache->object_size, 478 KASAN_SHADOW_SCALE_SIZE); 479 480 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 481 tag = assign_tag(cache, object, false, keep_tag); 482 483 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 484 kasan_unpoison_shadow(set_tag(object, tag), size); 485 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 486 KASAN_KMALLOC_REDZONE); 487 488 if (cache->flags & SLAB_KASAN) 489 set_track(&get_alloc_info(cache, object)->alloc_track, flags); 490 491 return set_tag(object, tag); 492 } 493 494 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, 495 gfp_t flags) 496 { 497 return __kasan_kmalloc(cache, object, cache->object_size, flags, false); 498 } 499 500 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 501 size_t size, gfp_t flags) 502 { 503 return __kasan_kmalloc(cache, object, size, flags, true); 504 } 505 EXPORT_SYMBOL(kasan_kmalloc); 506 507 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 508 gfp_t flags) 509 { 510 struct page *page; 511 unsigned long redzone_start; 512 unsigned long redzone_end; 513 514 if (gfpflags_allow_blocking(flags)) 515 quarantine_reduce(); 516 517 if (unlikely(ptr == NULL)) 518 return NULL; 519 520 page = virt_to_page(ptr); 521 redzone_start = round_up((unsigned long)(ptr + size), 522 KASAN_SHADOW_SCALE_SIZE); 523 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); 524 525 kasan_unpoison_shadow(ptr, size); 526 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 527 KASAN_PAGE_REDZONE); 528 529 return (void *)ptr; 530 } 531 532 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) 533 { 534 struct page *page; 535 536 if (unlikely(object == ZERO_SIZE_PTR)) 537 return (void *)object; 538 539 page = virt_to_head_page(object); 540 541 if (unlikely(!PageSlab(page))) 542 return kasan_kmalloc_large(object, size, flags); 543 else 544 return __kasan_kmalloc(page->slab_cache, object, size, 545 flags, true); 546 } 547 548 void kasan_poison_kfree(void *ptr, unsigned long ip) 549 { 550 struct page *page; 551 552 page = virt_to_head_page(ptr); 553 554 if (unlikely(!PageSlab(page))) { 555 if (ptr != page_address(page)) { 556 kasan_report_invalid_free(ptr, ip); 557 return; 558 } 559 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 560 KASAN_FREE_PAGE); 561 } else { 562 __kasan_slab_free(page->slab_cache, ptr, ip, false); 563 } 564 } 565 566 void kasan_kfree_large(void *ptr, unsigned long ip) 567 { 568 if (ptr != page_address(virt_to_head_page(ptr))) 569 kasan_report_invalid_free(ptr, ip); 570 /* The object will be poisoned by page_alloc. */ 571 } 572 573 int kasan_module_alloc(void *addr, size_t size) 574 { 575 void *ret; 576 size_t scaled_size; 577 size_t shadow_size; 578 unsigned long shadow_start; 579 580 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 581 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; 582 shadow_size = round_up(scaled_size, PAGE_SIZE); 583 584 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 585 return -EINVAL; 586 587 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 588 shadow_start + shadow_size, 589 GFP_KERNEL, 590 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 591 __builtin_return_address(0)); 592 593 if (ret) { 594 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 595 find_vm_area(addr)->flags |= VM_KASAN; 596 kmemleak_ignore(ret); 597 return 0; 598 } 599 600 return -ENOMEM; 601 } 602 603 void kasan_free_shadow(const struct vm_struct *vm) 604 { 605 if (vm->flags & VM_KASAN) 606 vfree(kasan_mem_to_shadow(vm->addr)); 607 } 608 609 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); 610 611 void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) 612 { 613 unsigned long flags = user_access_save(); 614 __kasan_report(addr, size, is_write, ip); 615 user_access_restore(flags); 616 } 617 618 #ifdef CONFIG_MEMORY_HOTPLUG 619 static bool shadow_mapped(unsigned long addr) 620 { 621 pgd_t *pgd = pgd_offset_k(addr); 622 p4d_t *p4d; 623 pud_t *pud; 624 pmd_t *pmd; 625 pte_t *pte; 626 627 if (pgd_none(*pgd)) 628 return false; 629 p4d = p4d_offset(pgd, addr); 630 if (p4d_none(*p4d)) 631 return false; 632 pud = pud_offset(p4d, addr); 633 if (pud_none(*pud)) 634 return false; 635 636 /* 637 * We can't use pud_large() or pud_huge(), the first one is 638 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 639 * pud_bad(), if pud is bad then it's bad because it's huge. 640 */ 641 if (pud_bad(*pud)) 642 return true; 643 pmd = pmd_offset(pud, addr); 644 if (pmd_none(*pmd)) 645 return false; 646 647 if (pmd_bad(*pmd)) 648 return true; 649 pte = pte_offset_kernel(pmd, addr); 650 return !pte_none(*pte); 651 } 652 653 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 654 unsigned long action, void *data) 655 { 656 struct memory_notify *mem_data = data; 657 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 658 unsigned long shadow_end, shadow_size; 659 660 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 661 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 662 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 663 shadow_size = nr_shadow_pages << PAGE_SHIFT; 664 shadow_end = shadow_start + shadow_size; 665 666 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || 667 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) 668 return NOTIFY_BAD; 669 670 switch (action) { 671 case MEM_GOING_ONLINE: { 672 void *ret; 673 674 /* 675 * If shadow is mapped already than it must have been mapped 676 * during the boot. This could happen if we onlining previously 677 * offlined memory. 678 */ 679 if (shadow_mapped(shadow_start)) 680 return NOTIFY_OK; 681 682 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 683 shadow_end, GFP_KERNEL, 684 PAGE_KERNEL, VM_NO_GUARD, 685 pfn_to_nid(mem_data->start_pfn), 686 __builtin_return_address(0)); 687 if (!ret) 688 return NOTIFY_BAD; 689 690 kmemleak_ignore(ret); 691 return NOTIFY_OK; 692 } 693 case MEM_CANCEL_ONLINE: 694 case MEM_OFFLINE: { 695 struct vm_struct *vm; 696 697 /* 698 * shadow_start was either mapped during boot by kasan_init() 699 * or during memory online by __vmalloc_node_range(). 700 * In the latter case we can use vfree() to free shadow. 701 * Non-NULL result of the find_vm_area() will tell us if 702 * that was the second case. 703 * 704 * Currently it's not possible to free shadow mapped 705 * during boot by kasan_init(). It's because the code 706 * to do that hasn't been written yet. So we'll just 707 * leak the memory. 708 */ 709 vm = find_vm_area((void *)shadow_start); 710 if (vm) 711 vfree((void *)shadow_start); 712 } 713 } 714 715 return NOTIFY_OK; 716 } 717 718 static int __init kasan_memhotplug_init(void) 719 { 720 hotplug_memory_notifier(kasan_mem_notifier, 0); 721 722 return 0; 723 } 724 725 core_initcall(kasan_memhotplug_init); 726 #endif 727