1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains common generic and tag-based KASAN code. 4 * 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * 8 * Some code borrowed from https://github.com/xairy/kasan-prototype by 9 * Andrey Konovalov <andreyknvl@gmail.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 */ 16 17 #include <linux/export.h> 18 #include <linux/interrupt.h> 19 #include <linux/init.h> 20 #include <linux/kasan.h> 21 #include <linux/kernel.h> 22 #include <linux/kmemleak.h> 23 #include <linux/linkage.h> 24 #include <linux/memblock.h> 25 #include <linux/memory.h> 26 #include <linux/mm.h> 27 #include <linux/module.h> 28 #include <linux/printk.h> 29 #include <linux/sched.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/slab.h> 32 #include <linux/stacktrace.h> 33 #include <linux/string.h> 34 #include <linux/types.h> 35 #include <linux/vmalloc.h> 36 #include <linux/bug.h> 37 #include <linux/uaccess.h> 38 39 #include "kasan.h" 40 #include "../slab.h" 41 42 static inline int in_irqentry_text(unsigned long ptr) 43 { 44 return (ptr >= (unsigned long)&__irqentry_text_start && 45 ptr < (unsigned long)&__irqentry_text_end) || 46 (ptr >= (unsigned long)&__softirqentry_text_start && 47 ptr < (unsigned long)&__softirqentry_text_end); 48 } 49 50 static inline unsigned int filter_irq_stacks(unsigned long *entries, 51 unsigned int nr_entries) 52 { 53 unsigned int i; 54 55 for (i = 0; i < nr_entries; i++) { 56 if (in_irqentry_text(entries[i])) { 57 /* Include the irqentry function into the stack. */ 58 return i + 1; 59 } 60 } 61 return nr_entries; 62 } 63 64 static inline depot_stack_handle_t save_stack(gfp_t flags) 65 { 66 unsigned long entries[KASAN_STACK_DEPTH]; 67 unsigned int nr_entries; 68 69 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); 70 nr_entries = filter_irq_stacks(entries, nr_entries); 71 return stack_depot_save(entries, nr_entries, flags); 72 } 73 74 static inline void set_track(struct kasan_track *track, gfp_t flags) 75 { 76 track->pid = current->pid; 77 track->stack = save_stack(flags); 78 } 79 80 void kasan_enable_current(void) 81 { 82 current->kasan_depth++; 83 } 84 85 void kasan_disable_current(void) 86 { 87 current->kasan_depth--; 88 } 89 90 bool __kasan_check_read(const volatile void *p, unsigned int size) 91 { 92 return check_memory_region((unsigned long)p, size, false, _RET_IP_); 93 } 94 EXPORT_SYMBOL(__kasan_check_read); 95 96 bool __kasan_check_write(const volatile void *p, unsigned int size) 97 { 98 return check_memory_region((unsigned long)p, size, true, _RET_IP_); 99 } 100 EXPORT_SYMBOL(__kasan_check_write); 101 102 #undef memset 103 void *memset(void *addr, int c, size_t len) 104 { 105 check_memory_region((unsigned long)addr, len, true, _RET_IP_); 106 107 return __memset(addr, c, len); 108 } 109 110 #undef memmove 111 void *memmove(void *dest, const void *src, size_t len) 112 { 113 check_memory_region((unsigned long)src, len, false, _RET_IP_); 114 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 115 116 return __memmove(dest, src, len); 117 } 118 119 #undef memcpy 120 void *memcpy(void *dest, const void *src, size_t len) 121 { 122 check_memory_region((unsigned long)src, len, false, _RET_IP_); 123 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 124 125 return __memcpy(dest, src, len); 126 } 127 128 /* 129 * Poisons the shadow memory for 'size' bytes starting from 'addr'. 130 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. 131 */ 132 void kasan_poison_shadow(const void *address, size_t size, u8 value) 133 { 134 void *shadow_start, *shadow_end; 135 136 /* 137 * Perform shadow offset calculation based on untagged address, as 138 * some of the callers (e.g. kasan_poison_object_data) pass tagged 139 * addresses to this function. 140 */ 141 address = reset_tag(address); 142 143 shadow_start = kasan_mem_to_shadow(address); 144 shadow_end = kasan_mem_to_shadow(address + size); 145 146 __memset(shadow_start, value, shadow_end - shadow_start); 147 } 148 149 void kasan_unpoison_shadow(const void *address, size_t size) 150 { 151 u8 tag = get_tag(address); 152 153 /* 154 * Perform shadow offset calculation based on untagged address, as 155 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged 156 * addresses to this function. 157 */ 158 address = reset_tag(address); 159 160 kasan_poison_shadow(address, size, tag); 161 162 if (size & KASAN_SHADOW_MASK) { 163 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); 164 165 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 166 *shadow = tag; 167 else 168 *shadow = size & KASAN_SHADOW_MASK; 169 } 170 } 171 172 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) 173 { 174 void *base = task_stack_page(task); 175 size_t size = sp - base; 176 177 kasan_unpoison_shadow(base, size); 178 } 179 180 /* Unpoison the entire stack for a task. */ 181 void kasan_unpoison_task_stack(struct task_struct *task) 182 { 183 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); 184 } 185 186 /* Unpoison the stack for the current task beyond a watermark sp value. */ 187 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) 188 { 189 /* 190 * Calculate the task stack base address. Avoid using 'current' 191 * because this function is called by early resume code which hasn't 192 * yet set up the percpu register (%gs). 193 */ 194 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); 195 196 kasan_unpoison_shadow(base, watermark - base); 197 } 198 199 /* 200 * Clear all poison for the region between the current SP and a provided 201 * watermark value, as is sometimes required prior to hand-crafted asm function 202 * returns in the middle of functions. 203 */ 204 void kasan_unpoison_stack_above_sp_to(const void *watermark) 205 { 206 const void *sp = __builtin_frame_address(0); 207 size_t size = watermark - sp; 208 209 if (WARN_ON(sp > watermark)) 210 return; 211 kasan_unpoison_shadow(sp, size); 212 } 213 214 void kasan_alloc_pages(struct page *page, unsigned int order) 215 { 216 u8 tag; 217 unsigned long i; 218 219 if (unlikely(PageHighMem(page))) 220 return; 221 222 tag = random_tag(); 223 for (i = 0; i < (1 << order); i++) 224 page_kasan_tag_set(page + i, tag); 225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); 226 } 227 228 void kasan_free_pages(struct page *page, unsigned int order) 229 { 230 if (likely(!PageHighMem(page))) 231 kasan_poison_shadow(page_address(page), 232 PAGE_SIZE << order, 233 KASAN_FREE_PAGE); 234 } 235 236 /* 237 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. 238 * For larger allocations larger redzones are used. 239 */ 240 static inline unsigned int optimal_redzone(unsigned int object_size) 241 { 242 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 243 return 0; 244 245 return 246 object_size <= 64 - 16 ? 16 : 247 object_size <= 128 - 32 ? 32 : 248 object_size <= 512 - 64 ? 64 : 249 object_size <= 4096 - 128 ? 128 : 250 object_size <= (1 << 14) - 256 ? 256 : 251 object_size <= (1 << 15) - 512 ? 512 : 252 object_size <= (1 << 16) - 1024 ? 1024 : 2048; 253 } 254 255 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 256 slab_flags_t *flags) 257 { 258 unsigned int orig_size = *size; 259 unsigned int redzone_size; 260 int redzone_adjust; 261 262 /* Add alloc meta. */ 263 cache->kasan_info.alloc_meta_offset = *size; 264 *size += sizeof(struct kasan_alloc_meta); 265 266 /* Add free meta. */ 267 if (IS_ENABLED(CONFIG_KASAN_GENERIC) && 268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || 269 cache->object_size < sizeof(struct kasan_free_meta))) { 270 cache->kasan_info.free_meta_offset = *size; 271 *size += sizeof(struct kasan_free_meta); 272 } 273 274 redzone_size = optimal_redzone(cache->object_size); 275 redzone_adjust = redzone_size - (*size - cache->object_size); 276 if (redzone_adjust > 0) 277 *size += redzone_adjust; 278 279 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, 280 max(*size, cache->object_size + redzone_size)); 281 282 /* 283 * If the metadata doesn't fit, don't enable KASAN at all. 284 */ 285 if (*size <= cache->kasan_info.alloc_meta_offset || 286 *size <= cache->kasan_info.free_meta_offset) { 287 cache->kasan_info.alloc_meta_offset = 0; 288 cache->kasan_info.free_meta_offset = 0; 289 *size = orig_size; 290 return; 291 } 292 293 *flags |= SLAB_KASAN; 294 } 295 296 size_t kasan_metadata_size(struct kmem_cache *cache) 297 { 298 return (cache->kasan_info.alloc_meta_offset ? 299 sizeof(struct kasan_alloc_meta) : 0) + 300 (cache->kasan_info.free_meta_offset ? 301 sizeof(struct kasan_free_meta) : 0); 302 } 303 304 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, 305 const void *object) 306 { 307 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); 308 return (void *)object + cache->kasan_info.alloc_meta_offset; 309 } 310 311 struct kasan_free_meta *get_free_info(struct kmem_cache *cache, 312 const void *object) 313 { 314 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); 315 return (void *)object + cache->kasan_info.free_meta_offset; 316 } 317 318 void kasan_poison_slab(struct page *page) 319 { 320 unsigned long i; 321 322 for (i = 0; i < (1 << compound_order(page)); i++) 323 page_kasan_tag_reset(page + i); 324 kasan_poison_shadow(page_address(page), 325 PAGE_SIZE << compound_order(page), 326 KASAN_KMALLOC_REDZONE); 327 } 328 329 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) 330 { 331 kasan_unpoison_shadow(object, cache->object_size); 332 } 333 334 void kasan_poison_object_data(struct kmem_cache *cache, void *object) 335 { 336 kasan_poison_shadow(object, 337 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 338 KASAN_KMALLOC_REDZONE); 339 } 340 341 /* 342 * This function assigns a tag to an object considering the following: 343 * 1. A cache might have a constructor, which might save a pointer to a slab 344 * object somewhere (e.g. in the object itself). We preassign a tag for 345 * each object in caches with constructors during slab creation and reuse 346 * the same tag each time a particular object is allocated. 347 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be 348 * accessed after being freed. We preassign tags for objects in these 349 * caches as well. 350 * 3. For SLAB allocator we can't preassign tags randomly since the freelist 351 * is stored as an array of indexes instead of a linked list. Assign tags 352 * based on objects indexes, so that objects that are next to each other 353 * get different tags. 354 */ 355 static u8 assign_tag(struct kmem_cache *cache, const void *object, 356 bool init, bool keep_tag) 357 { 358 /* 359 * 1. When an object is kmalloc()'ed, two hooks are called: 360 * kasan_slab_alloc() and kasan_kmalloc(). We assign the 361 * tag only in the first one. 362 * 2. We reuse the same tag for krealloc'ed objects. 363 */ 364 if (keep_tag) 365 return get_tag(object); 366 367 /* 368 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU 369 * set, assign a tag when the object is being allocated (init == false). 370 */ 371 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 372 return init ? KASAN_TAG_KERNEL : random_tag(); 373 374 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ 375 #ifdef CONFIG_SLAB 376 /* For SLAB assign tags based on the object index in the freelist. */ 377 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 378 #else 379 /* 380 * For SLUB assign a random tag during slab creation, otherwise reuse 381 * the already assigned tag. 382 */ 383 return init ? random_tag() : get_tag(object); 384 #endif 385 } 386 387 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, 388 const void *object) 389 { 390 struct kasan_alloc_meta *alloc_info; 391 392 if (!(cache->flags & SLAB_KASAN)) 393 return (void *)object; 394 395 alloc_info = get_alloc_info(cache, object); 396 __memset(alloc_info, 0, sizeof(*alloc_info)); 397 398 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 399 object = set_tag(object, 400 assign_tag(cache, object, true, false)); 401 402 return (void *)object; 403 } 404 405 static inline bool shadow_invalid(u8 tag, s8 shadow_byte) 406 { 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 408 return shadow_byte < 0 || 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 410 else 411 return tag != (u8)shadow_byte; 412 } 413 414 static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 415 unsigned long ip, bool quarantine) 416 { 417 s8 shadow_byte; 418 u8 tag; 419 void *tagged_object; 420 unsigned long rounded_up_size; 421 422 tag = get_tag(object); 423 tagged_object = object; 424 object = reset_tag(object); 425 426 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != 427 object)) { 428 kasan_report_invalid_free(tagged_object, ip); 429 return true; 430 } 431 432 /* RCU slabs could be legally used after free within the RCU period */ 433 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) 434 return false; 435 436 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); 437 if (shadow_invalid(tag, shadow_byte)) { 438 kasan_report_invalid_free(tagged_object, ip); 439 return true; 440 } 441 442 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); 443 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 444 445 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || 446 unlikely(!(cache->flags & SLAB_KASAN))) 447 return false; 448 449 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); 450 quarantine_put(get_free_info(cache, object), cache); 451 452 return IS_ENABLED(CONFIG_KASAN_GENERIC); 453 } 454 455 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) 456 { 457 return __kasan_slab_free(cache, object, ip, true); 458 } 459 460 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, 461 size_t size, gfp_t flags, bool keep_tag) 462 { 463 unsigned long redzone_start; 464 unsigned long redzone_end; 465 u8 tag = 0xff; 466 467 if (gfpflags_allow_blocking(flags)) 468 quarantine_reduce(); 469 470 if (unlikely(object == NULL)) 471 return NULL; 472 473 redzone_start = round_up((unsigned long)(object + size), 474 KASAN_SHADOW_SCALE_SIZE); 475 redzone_end = round_up((unsigned long)object + cache->object_size, 476 KASAN_SHADOW_SCALE_SIZE); 477 478 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 479 tag = assign_tag(cache, object, false, keep_tag); 480 481 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 482 kasan_unpoison_shadow(set_tag(object, tag), size); 483 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 484 KASAN_KMALLOC_REDZONE); 485 486 if (cache->flags & SLAB_KASAN) 487 set_track(&get_alloc_info(cache, object)->alloc_track, flags); 488 489 return set_tag(object, tag); 490 } 491 492 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, 493 gfp_t flags) 494 { 495 return __kasan_kmalloc(cache, object, cache->object_size, flags, false); 496 } 497 498 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 499 size_t size, gfp_t flags) 500 { 501 return __kasan_kmalloc(cache, object, size, flags, true); 502 } 503 EXPORT_SYMBOL(kasan_kmalloc); 504 505 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 506 gfp_t flags) 507 { 508 struct page *page; 509 unsigned long redzone_start; 510 unsigned long redzone_end; 511 512 if (gfpflags_allow_blocking(flags)) 513 quarantine_reduce(); 514 515 if (unlikely(ptr == NULL)) 516 return NULL; 517 518 page = virt_to_page(ptr); 519 redzone_start = round_up((unsigned long)(ptr + size), 520 KASAN_SHADOW_SCALE_SIZE); 521 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); 522 523 kasan_unpoison_shadow(ptr, size); 524 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 525 KASAN_PAGE_REDZONE); 526 527 return (void *)ptr; 528 } 529 530 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) 531 { 532 struct page *page; 533 534 if (unlikely(object == ZERO_SIZE_PTR)) 535 return (void *)object; 536 537 page = virt_to_head_page(object); 538 539 if (unlikely(!PageSlab(page))) 540 return kasan_kmalloc_large(object, size, flags); 541 else 542 return __kasan_kmalloc(page->slab_cache, object, size, 543 flags, true); 544 } 545 546 void kasan_poison_kfree(void *ptr, unsigned long ip) 547 { 548 struct page *page; 549 550 page = virt_to_head_page(ptr); 551 552 if (unlikely(!PageSlab(page))) { 553 if (ptr != page_address(page)) { 554 kasan_report_invalid_free(ptr, ip); 555 return; 556 } 557 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 558 KASAN_FREE_PAGE); 559 } else { 560 __kasan_slab_free(page->slab_cache, ptr, ip, false); 561 } 562 } 563 564 void kasan_kfree_large(void *ptr, unsigned long ip) 565 { 566 if (ptr != page_address(virt_to_head_page(ptr))) 567 kasan_report_invalid_free(ptr, ip); 568 /* The object will be poisoned by page_alloc. */ 569 } 570 571 int kasan_module_alloc(void *addr, size_t size) 572 { 573 void *ret; 574 size_t scaled_size; 575 size_t shadow_size; 576 unsigned long shadow_start; 577 578 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 579 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; 580 shadow_size = round_up(scaled_size, PAGE_SIZE); 581 582 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 583 return -EINVAL; 584 585 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 586 shadow_start + shadow_size, 587 GFP_KERNEL, 588 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 589 __builtin_return_address(0)); 590 591 if (ret) { 592 __memset(ret, KASAN_SHADOW_INIT, shadow_size); 593 find_vm_area(addr)->flags |= VM_KASAN; 594 kmemleak_ignore(ret); 595 return 0; 596 } 597 598 return -ENOMEM; 599 } 600 601 void kasan_free_shadow(const struct vm_struct *vm) 602 { 603 if (vm->flags & VM_KASAN) 604 vfree(kasan_mem_to_shadow(vm->addr)); 605 } 606 607 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); 608 609 void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) 610 { 611 unsigned long flags = user_access_save(); 612 __kasan_report(addr, size, is_write, ip); 613 user_access_restore(flags); 614 } 615 616 #ifdef CONFIG_MEMORY_HOTPLUG 617 static bool shadow_mapped(unsigned long addr) 618 { 619 pgd_t *pgd = pgd_offset_k(addr); 620 p4d_t *p4d; 621 pud_t *pud; 622 pmd_t *pmd; 623 pte_t *pte; 624 625 if (pgd_none(*pgd)) 626 return false; 627 p4d = p4d_offset(pgd, addr); 628 if (p4d_none(*p4d)) 629 return false; 630 pud = pud_offset(p4d, addr); 631 if (pud_none(*pud)) 632 return false; 633 634 /* 635 * We can't use pud_large() or pud_huge(), the first one is 636 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 637 * pud_bad(), if pud is bad then it's bad because it's huge. 638 */ 639 if (pud_bad(*pud)) 640 return true; 641 pmd = pmd_offset(pud, addr); 642 if (pmd_none(*pmd)) 643 return false; 644 645 if (pmd_bad(*pmd)) 646 return true; 647 pte = pte_offset_kernel(pmd, addr); 648 return !pte_none(*pte); 649 } 650 651 static int __meminit kasan_mem_notifier(struct notifier_block *nb, 652 unsigned long action, void *data) 653 { 654 struct memory_notify *mem_data = data; 655 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 656 unsigned long shadow_end, shadow_size; 657 658 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 659 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 660 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 661 shadow_size = nr_shadow_pages << PAGE_SHIFT; 662 shadow_end = shadow_start + shadow_size; 663 664 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || 665 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) 666 return NOTIFY_BAD; 667 668 switch (action) { 669 case MEM_GOING_ONLINE: { 670 void *ret; 671 672 /* 673 * If shadow is mapped already than it must have been mapped 674 * during the boot. This could happen if we onlining previously 675 * offlined memory. 676 */ 677 if (shadow_mapped(shadow_start)) 678 return NOTIFY_OK; 679 680 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 681 shadow_end, GFP_KERNEL, 682 PAGE_KERNEL, VM_NO_GUARD, 683 pfn_to_nid(mem_data->start_pfn), 684 __builtin_return_address(0)); 685 if (!ret) 686 return NOTIFY_BAD; 687 688 kmemleak_ignore(ret); 689 return NOTIFY_OK; 690 } 691 case MEM_CANCEL_ONLINE: 692 case MEM_OFFLINE: { 693 struct vm_struct *vm; 694 695 /* 696 * shadow_start was either mapped during boot by kasan_init() 697 * or during memory online by __vmalloc_node_range(). 698 * In the latter case we can use vfree() to free shadow. 699 * Non-NULL result of the find_vm_area() will tell us if 700 * that was the second case. 701 * 702 * Currently it's not possible to free shadow mapped 703 * during boot by kasan_init(). It's because the code 704 * to do that hasn't been written yet. So we'll just 705 * leak the memory. 706 */ 707 vm = find_vm_area((void *)shadow_start); 708 if (vm) 709 vfree((void *)shadow_start); 710 } 711 } 712 713 return NOTIFY_OK; 714 } 715 716 static int __init kasan_memhotplug_init(void) 717 { 718 hotplug_memory_notifier(kasan_mem_notifier, 0); 719 720 return 0; 721 } 722 723 core_initcall(kasan_memhotplug_init); 724 #endif 725