1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Slab allocator functions that are independent of the allocator strategy 4 * 5 * (C) 2012 Christoph Lameter <cl@linux.com> 6 */ 7 #include <linux/slab.h> 8 9 #include <linux/mm.h> 10 #include <linux/poison.h> 11 #include <linux/interrupt.h> 12 #include <linux/memory.h> 13 #include <linux/cache.h> 14 #include <linux/compiler.h> 15 #include <linux/kfence.h> 16 #include <linux/module.h> 17 #include <linux/cpu.h> 18 #include <linux/uaccess.h> 19 #include <linux/seq_file.h> 20 #include <linux/proc_fs.h> 21 #include <linux/debugfs.h> 22 #include <linux/kasan.h> 23 #include <asm/cacheflush.h> 24 #include <asm/tlbflush.h> 25 #include <asm/page.h> 26 #include <linux/memcontrol.h> 27 #include <linux/stackdepot.h> 28 29 #include "internal.h" 30 #include "slab.h" 31 32 #define CREATE_TRACE_POINTS 33 #include <trace/events/kmem.h> 34 35 enum slab_state slab_state; 36 LIST_HEAD(slab_caches); 37 DEFINE_MUTEX(slab_mutex); 38 struct kmem_cache *kmem_cache; 39 40 static LIST_HEAD(slab_caches_to_rcu_destroy); 41 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); 42 static DECLARE_WORK(slab_caches_to_rcu_destroy_work, 43 slab_caches_to_rcu_destroy_workfn); 44 45 /* 46 * Set of flags that will prevent slab merging 47 */ 48 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 49 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ 50 SLAB_FAILSLAB | kasan_never_merge()) 51 52 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 53 SLAB_CACHE_DMA32 | SLAB_ACCOUNT) 54 55 /* 56 * Merge control. If this is set then no merging of slab caches will occur. 57 */ 58 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); 59 60 static int __init setup_slab_nomerge(char *str) 61 { 62 slab_nomerge = true; 63 return 1; 64 } 65 66 static int __init setup_slab_merge(char *str) 67 { 68 slab_nomerge = false; 69 return 1; 70 } 71 72 #ifdef CONFIG_SLUB 73 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); 74 __setup_param("slub_merge", slub_merge, setup_slab_merge, 0); 75 #endif 76 77 __setup("slab_nomerge", setup_slab_nomerge); 78 __setup("slab_merge", setup_slab_merge); 79 80 /* 81 * Determine the size of a slab object 82 */ 83 unsigned int kmem_cache_size(struct kmem_cache *s) 84 { 85 return s->object_size; 86 } 87 EXPORT_SYMBOL(kmem_cache_size); 88 89 #ifdef CONFIG_DEBUG_VM 90 static int kmem_cache_sanity_check(const char *name, unsigned int size) 91 { 92 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { 93 pr_err("kmem_cache_create(%s) integrity check failed\n", name); 94 return -EINVAL; 95 } 96 97 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 98 return 0; 99 } 100 #else 101 static inline int kmem_cache_sanity_check(const char *name, unsigned int size) 102 { 103 return 0; 104 } 105 #endif 106 107 /* 108 * Figure out what the alignment of the objects will be given a set of 109 * flags, a user specified alignment and the size of the objects. 110 */ 111 static unsigned int calculate_alignment(slab_flags_t flags, 112 unsigned int align, unsigned int size) 113 { 114 /* 115 * If the user wants hardware cache aligned objects then follow that 116 * suggestion if the object is sufficiently large. 117 * 118 * The hardware cache alignment cannot override the specified 119 * alignment though. If that is greater then use it. 120 */ 121 if (flags & SLAB_HWCACHE_ALIGN) { 122 unsigned int ralign; 123 124 ralign = cache_line_size(); 125 while (size <= ralign / 2) 126 ralign /= 2; 127 align = max(align, ralign); 128 } 129 130 align = max(align, arch_slab_minalign()); 131 132 return ALIGN(align, sizeof(void *)); 133 } 134 135 /* 136 * Find a mergeable slab cache 137 */ 138 int slab_unmergeable(struct kmem_cache *s) 139 { 140 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) 141 return 1; 142 143 if (s->ctor) 144 return 1; 145 146 #ifdef CONFIG_HARDENED_USERCOPY 147 if (s->usersize) 148 return 1; 149 #endif 150 151 /* 152 * We may have set a slab to be unmergeable during bootstrap. 153 */ 154 if (s->refcount < 0) 155 return 1; 156 157 return 0; 158 } 159 160 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, 161 slab_flags_t flags, const char *name, void (*ctor)(void *)) 162 { 163 struct kmem_cache *s; 164 165 if (slab_nomerge) 166 return NULL; 167 168 if (ctor) 169 return NULL; 170 171 size = ALIGN(size, sizeof(void *)); 172 align = calculate_alignment(flags, align, size); 173 size = ALIGN(size, align); 174 flags = kmem_cache_flags(size, flags, name); 175 176 if (flags & SLAB_NEVER_MERGE) 177 return NULL; 178 179 list_for_each_entry_reverse(s, &slab_caches, list) { 180 if (slab_unmergeable(s)) 181 continue; 182 183 if (size > s->size) 184 continue; 185 186 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) 187 continue; 188 /* 189 * Check if alignment is compatible. 190 * Courtesy of Adrian Drzewiecki 191 */ 192 if ((s->size & ~(align - 1)) != s->size) 193 continue; 194 195 if (s->size - size >= sizeof(void *)) 196 continue; 197 198 if (IS_ENABLED(CONFIG_SLAB) && align && 199 (align > s->align || s->align % align)) 200 continue; 201 202 return s; 203 } 204 return NULL; 205 } 206 207 static struct kmem_cache *create_cache(const char *name, 208 unsigned int object_size, unsigned int align, 209 slab_flags_t flags, unsigned int useroffset, 210 unsigned int usersize, void (*ctor)(void *), 211 struct kmem_cache *root_cache) 212 { 213 struct kmem_cache *s; 214 int err; 215 216 if (WARN_ON(useroffset + usersize > object_size)) 217 useroffset = usersize = 0; 218 219 err = -ENOMEM; 220 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 221 if (!s) 222 goto out; 223 224 s->name = name; 225 s->size = s->object_size = object_size; 226 s->align = align; 227 s->ctor = ctor; 228 #ifdef CONFIG_HARDENED_USERCOPY 229 s->useroffset = useroffset; 230 s->usersize = usersize; 231 #endif 232 233 err = __kmem_cache_create(s, flags); 234 if (err) 235 goto out_free_cache; 236 237 s->refcount = 1; 238 list_add(&s->list, &slab_caches); 239 out: 240 if (err) 241 return ERR_PTR(err); 242 return s; 243 244 out_free_cache: 245 kmem_cache_free(kmem_cache, s); 246 goto out; 247 } 248 249 /** 250 * kmem_cache_create_usercopy - Create a cache with a region suitable 251 * for copying to userspace 252 * @name: A string which is used in /proc/slabinfo to identify this cache. 253 * @size: The size of objects to be created in this cache. 254 * @align: The required alignment for the objects. 255 * @flags: SLAB flags 256 * @useroffset: Usercopy region offset 257 * @usersize: Usercopy region size 258 * @ctor: A constructor for the objects. 259 * 260 * Cannot be called within a interrupt, but can be interrupted. 261 * The @ctor is run when new pages are allocated by the cache. 262 * 263 * The flags are 264 * 265 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 266 * to catch references to uninitialised memory. 267 * 268 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check 269 * for buffer overruns. 270 * 271 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 272 * cacheline. This can be beneficial if you're counting cycles as closely 273 * as davem. 274 * 275 * Return: a pointer to the cache on success, NULL on failure. 276 */ 277 struct kmem_cache * 278 kmem_cache_create_usercopy(const char *name, 279 unsigned int size, unsigned int align, 280 slab_flags_t flags, 281 unsigned int useroffset, unsigned int usersize, 282 void (*ctor)(void *)) 283 { 284 struct kmem_cache *s = NULL; 285 const char *cache_name; 286 int err; 287 288 #ifdef CONFIG_SLUB_DEBUG 289 /* 290 * If no slub_debug was enabled globally, the static key is not yet 291 * enabled by setup_slub_debug(). Enable it if the cache is being 292 * created with any of the debugging flags passed explicitly. 293 * It's also possible that this is the first cache created with 294 * SLAB_STORE_USER and we should init stack_depot for it. 295 */ 296 if (flags & SLAB_DEBUG_FLAGS) 297 static_branch_enable(&slub_debug_enabled); 298 if (flags & SLAB_STORE_USER) 299 stack_depot_init(); 300 #endif 301 302 mutex_lock(&slab_mutex); 303 304 err = kmem_cache_sanity_check(name, size); 305 if (err) { 306 goto out_unlock; 307 } 308 309 /* Refuse requests with allocator specific flags */ 310 if (flags & ~SLAB_FLAGS_PERMITTED) { 311 err = -EINVAL; 312 goto out_unlock; 313 } 314 315 /* 316 * Some allocators will constraint the set of valid flags to a subset 317 * of all flags. We expect them to define CACHE_CREATE_MASK in this 318 * case, and we'll just provide them with a sanitized version of the 319 * passed flags. 320 */ 321 flags &= CACHE_CREATE_MASK; 322 323 /* Fail closed on bad usersize of useroffset values. */ 324 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || 325 WARN_ON(!usersize && useroffset) || 326 WARN_ON(size < usersize || size - usersize < useroffset)) 327 usersize = useroffset = 0; 328 329 if (!usersize) 330 s = __kmem_cache_alias(name, size, align, flags, ctor); 331 if (s) 332 goto out_unlock; 333 334 cache_name = kstrdup_const(name, GFP_KERNEL); 335 if (!cache_name) { 336 err = -ENOMEM; 337 goto out_unlock; 338 } 339 340 s = create_cache(cache_name, size, 341 calculate_alignment(flags, align, size), 342 flags, useroffset, usersize, ctor, NULL); 343 if (IS_ERR(s)) { 344 err = PTR_ERR(s); 345 kfree_const(cache_name); 346 } 347 348 out_unlock: 349 mutex_unlock(&slab_mutex); 350 351 if (err) { 352 if (flags & SLAB_PANIC) 353 panic("%s: Failed to create slab '%s'. Error %d\n", 354 __func__, name, err); 355 else { 356 pr_warn("%s(%s) failed with error %d\n", 357 __func__, name, err); 358 dump_stack(); 359 } 360 return NULL; 361 } 362 return s; 363 } 364 EXPORT_SYMBOL(kmem_cache_create_usercopy); 365 366 /** 367 * kmem_cache_create - Create a cache. 368 * @name: A string which is used in /proc/slabinfo to identify this cache. 369 * @size: The size of objects to be created in this cache. 370 * @align: The required alignment for the objects. 371 * @flags: SLAB flags 372 * @ctor: A constructor for the objects. 373 * 374 * Cannot be called within a interrupt, but can be interrupted. 375 * The @ctor is run when new pages are allocated by the cache. 376 * 377 * The flags are 378 * 379 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 380 * to catch references to uninitialised memory. 381 * 382 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check 383 * for buffer overruns. 384 * 385 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 386 * cacheline. This can be beneficial if you're counting cycles as closely 387 * as davem. 388 * 389 * Return: a pointer to the cache on success, NULL on failure. 390 */ 391 struct kmem_cache * 392 kmem_cache_create(const char *name, unsigned int size, unsigned int align, 393 slab_flags_t flags, void (*ctor)(void *)) 394 { 395 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, 396 ctor); 397 } 398 EXPORT_SYMBOL(kmem_cache_create); 399 400 #ifdef SLAB_SUPPORTS_SYSFS 401 /* 402 * For a given kmem_cache, kmem_cache_destroy() should only be called 403 * once or there will be a use-after-free problem. The actual deletion 404 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock 405 * protection. So they are now done without holding those locks. 406 * 407 * Note that there will be a slight delay in the deletion of sysfs files 408 * if kmem_cache_release() is called indrectly from a work function. 409 */ 410 static void kmem_cache_release(struct kmem_cache *s) 411 { 412 sysfs_slab_unlink(s); 413 sysfs_slab_release(s); 414 } 415 #else 416 static void kmem_cache_release(struct kmem_cache *s) 417 { 418 slab_kmem_cache_release(s); 419 } 420 #endif 421 422 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) 423 { 424 LIST_HEAD(to_destroy); 425 struct kmem_cache *s, *s2; 426 427 /* 428 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the 429 * @slab_caches_to_rcu_destroy list. The slab pages are freed 430 * through RCU and the associated kmem_cache are dereferenced 431 * while freeing the pages, so the kmem_caches should be freed only 432 * after the pending RCU operations are finished. As rcu_barrier() 433 * is a pretty slow operation, we batch all pending destructions 434 * asynchronously. 435 */ 436 mutex_lock(&slab_mutex); 437 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); 438 mutex_unlock(&slab_mutex); 439 440 if (list_empty(&to_destroy)) 441 return; 442 443 rcu_barrier(); 444 445 list_for_each_entry_safe(s, s2, &to_destroy, list) { 446 debugfs_slab_release(s); 447 kfence_shutdown_cache(s); 448 kmem_cache_release(s); 449 } 450 } 451 452 static int shutdown_cache(struct kmem_cache *s) 453 { 454 /* free asan quarantined objects */ 455 kasan_cache_shutdown(s); 456 457 if (__kmem_cache_shutdown(s) != 0) 458 return -EBUSY; 459 460 list_del(&s->list); 461 462 if (s->flags & SLAB_TYPESAFE_BY_RCU) { 463 list_add_tail(&s->list, &slab_caches_to_rcu_destroy); 464 schedule_work(&slab_caches_to_rcu_destroy_work); 465 } else { 466 kfence_shutdown_cache(s); 467 debugfs_slab_release(s); 468 } 469 470 return 0; 471 } 472 473 void slab_kmem_cache_release(struct kmem_cache *s) 474 { 475 __kmem_cache_release(s); 476 kfree_const(s->name); 477 kmem_cache_free(kmem_cache, s); 478 } 479 480 void kmem_cache_destroy(struct kmem_cache *s) 481 { 482 int refcnt; 483 bool rcu_set; 484 485 if (unlikely(!s) || !kasan_check_byte(s)) 486 return; 487 488 cpus_read_lock(); 489 mutex_lock(&slab_mutex); 490 491 rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; 492 493 refcnt = --s->refcount; 494 if (refcnt) 495 goto out_unlock; 496 497 WARN(shutdown_cache(s), 498 "%s %s: Slab cache still has objects when called from %pS", 499 __func__, s->name, (void *)_RET_IP_); 500 out_unlock: 501 mutex_unlock(&slab_mutex); 502 cpus_read_unlock(); 503 if (!refcnt && !rcu_set) 504 kmem_cache_release(s); 505 } 506 EXPORT_SYMBOL(kmem_cache_destroy); 507 508 /** 509 * kmem_cache_shrink - Shrink a cache. 510 * @cachep: The cache to shrink. 511 * 512 * Releases as many slabs as possible for a cache. 513 * To help debugging, a zero exit status indicates all slabs were released. 514 * 515 * Return: %0 if all slabs were released, non-zero otherwise 516 */ 517 int kmem_cache_shrink(struct kmem_cache *cachep) 518 { 519 kasan_cache_shrink(cachep); 520 521 return __kmem_cache_shrink(cachep); 522 } 523 EXPORT_SYMBOL(kmem_cache_shrink); 524 525 bool slab_is_available(void) 526 { 527 return slab_state >= UP; 528 } 529 530 #ifdef CONFIG_PRINTK 531 /** 532 * kmem_valid_obj - does the pointer reference a valid slab object? 533 * @object: pointer to query. 534 * 535 * Return: %true if the pointer is to a not-yet-freed object from 536 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer 537 * is to an already-freed object, and %false otherwise. 538 */ 539 bool kmem_valid_obj(void *object) 540 { 541 struct folio *folio; 542 543 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ 544 if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) 545 return false; 546 folio = virt_to_folio(object); 547 return folio_test_slab(folio); 548 } 549 EXPORT_SYMBOL_GPL(kmem_valid_obj); 550 551 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 552 { 553 if (__kfence_obj_info(kpp, object, slab)) 554 return; 555 __kmem_obj_info(kpp, object, slab); 556 } 557 558 /** 559 * kmem_dump_obj - Print available slab provenance information 560 * @object: slab object for which to find provenance information. 561 * 562 * This function uses pr_cont(), so that the caller is expected to have 563 * printed out whatever preamble is appropriate. The provenance information 564 * depends on the type of object and on how much debugging is enabled. 565 * For a slab-cache object, the fact that it is a slab object is printed, 566 * and, if available, the slab name, return address, and stack trace from 567 * the allocation and last free path of that object. 568 * 569 * This function will splat if passed a pointer to a non-slab object. 570 * If you are not sure what type of object you have, you should instead 571 * use mem_dump_obj(). 572 */ 573 void kmem_dump_obj(void *object) 574 { 575 char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; 576 int i; 577 struct slab *slab; 578 unsigned long ptroffset; 579 struct kmem_obj_info kp = { }; 580 581 if (WARN_ON_ONCE(!virt_addr_valid(object))) 582 return; 583 slab = virt_to_slab(object); 584 if (WARN_ON_ONCE(!slab)) { 585 pr_cont(" non-slab memory.\n"); 586 return; 587 } 588 kmem_obj_info(&kp, object, slab); 589 if (kp.kp_slab_cache) 590 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); 591 else 592 pr_cont(" slab%s", cp); 593 if (is_kfence_address(object)) 594 pr_cont(" (kfence)"); 595 if (kp.kp_objp) 596 pr_cont(" start %px", kp.kp_objp); 597 if (kp.kp_data_offset) 598 pr_cont(" data offset %lu", kp.kp_data_offset); 599 if (kp.kp_objp) { 600 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; 601 pr_cont(" pointer offset %lu", ptroffset); 602 } 603 if (kp.kp_slab_cache && kp.kp_slab_cache->object_size) 604 pr_cont(" size %u", kp.kp_slab_cache->object_size); 605 if (kp.kp_ret) 606 pr_cont(" allocated at %pS\n", kp.kp_ret); 607 else 608 pr_cont("\n"); 609 for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) { 610 if (!kp.kp_stack[i]) 611 break; 612 pr_info(" %pS\n", kp.kp_stack[i]); 613 } 614 615 if (kp.kp_free_stack[0]) 616 pr_cont(" Free path:\n"); 617 618 for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) { 619 if (!kp.kp_free_stack[i]) 620 break; 621 pr_info(" %pS\n", kp.kp_free_stack[i]); 622 } 623 624 } 625 EXPORT_SYMBOL_GPL(kmem_dump_obj); 626 #endif 627 628 #ifndef CONFIG_SLOB 629 /* Create a cache during boot when no slab services are available yet */ 630 void __init create_boot_cache(struct kmem_cache *s, const char *name, 631 unsigned int size, slab_flags_t flags, 632 unsigned int useroffset, unsigned int usersize) 633 { 634 int err; 635 unsigned int align = ARCH_KMALLOC_MINALIGN; 636 637 s->name = name; 638 s->size = s->object_size = size; 639 640 /* 641 * For power of two sizes, guarantee natural alignment for kmalloc 642 * caches, regardless of SL*B debugging options. 643 */ 644 if (is_power_of_2(size)) 645 align = max(align, size); 646 s->align = calculate_alignment(flags, align, size); 647 648 #ifdef CONFIG_HARDENED_USERCOPY 649 s->useroffset = useroffset; 650 s->usersize = usersize; 651 #endif 652 653 err = __kmem_cache_create(s, flags); 654 655 if (err) 656 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", 657 name, size, err); 658 659 s->refcount = -1; /* Exempt from merging for now */ 660 } 661 662 struct kmem_cache *__init create_kmalloc_cache(const char *name, 663 unsigned int size, slab_flags_t flags, 664 unsigned int useroffset, unsigned int usersize) 665 { 666 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 667 668 if (!s) 669 panic("Out of memory when creating slab %s\n", name); 670 671 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset, 672 usersize); 673 kasan_cache_create_kmalloc(s); 674 list_add(&s->list, &slab_caches); 675 s->refcount = 1; 676 return s; 677 } 678 679 struct kmem_cache * 680 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init = 681 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ }; 682 EXPORT_SYMBOL(kmalloc_caches); 683 684 /* 685 * Conversion table for small slabs sizes / 8 to the index in the 686 * kmalloc array. This is necessary for slabs < 192 since we have non power 687 * of two cache sizes there. The size of larger slabs can be determined using 688 * fls. 689 */ 690 static u8 size_index[24] __ro_after_init = { 691 3, /* 8 */ 692 4, /* 16 */ 693 5, /* 24 */ 694 5, /* 32 */ 695 6, /* 40 */ 696 6, /* 48 */ 697 6, /* 56 */ 698 6, /* 64 */ 699 1, /* 72 */ 700 1, /* 80 */ 701 1, /* 88 */ 702 1, /* 96 */ 703 7, /* 104 */ 704 7, /* 112 */ 705 7, /* 120 */ 706 7, /* 128 */ 707 2, /* 136 */ 708 2, /* 144 */ 709 2, /* 152 */ 710 2, /* 160 */ 711 2, /* 168 */ 712 2, /* 176 */ 713 2, /* 184 */ 714 2 /* 192 */ 715 }; 716 717 static inline unsigned int size_index_elem(unsigned int bytes) 718 { 719 return (bytes - 1) / 8; 720 } 721 722 /* 723 * Find the kmem_cache structure that serves a given size of 724 * allocation 725 */ 726 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) 727 { 728 unsigned int index; 729 730 if (size <= 192) { 731 if (!size) 732 return ZERO_SIZE_PTR; 733 734 index = size_index[size_index_elem(size)]; 735 } else { 736 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) 737 return NULL; 738 index = fls(size - 1); 739 } 740 741 return kmalloc_caches[kmalloc_type(flags)][index]; 742 } 743 744 size_t kmalloc_size_roundup(size_t size) 745 { 746 struct kmem_cache *c; 747 748 /* Short-circuit the 0 size case. */ 749 if (unlikely(size == 0)) 750 return 0; 751 /* Short-circuit saturated "too-large" case. */ 752 if (unlikely(size == SIZE_MAX)) 753 return SIZE_MAX; 754 /* Above the smaller buckets, size is a multiple of page size. */ 755 if (size > KMALLOC_MAX_CACHE_SIZE) 756 return PAGE_SIZE << get_order(size); 757 758 /* The flags don't matter since size_index is common to all. */ 759 c = kmalloc_slab(size, GFP_KERNEL); 760 return c ? c->object_size : 0; 761 } 762 EXPORT_SYMBOL(kmalloc_size_roundup); 763 764 #ifdef CONFIG_ZONE_DMA 765 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, 766 #else 767 #define KMALLOC_DMA_NAME(sz) 768 #endif 769 770 #ifdef CONFIG_MEMCG_KMEM 771 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz, 772 #else 773 #define KMALLOC_CGROUP_NAME(sz) 774 #endif 775 776 #ifndef CONFIG_SLUB_TINY 777 #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, 778 #else 779 #define KMALLOC_RCL_NAME(sz) 780 #endif 781 782 #define INIT_KMALLOC_INFO(__size, __short_size) \ 783 { \ 784 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ 785 KMALLOC_RCL_NAME(__short_size) \ 786 KMALLOC_CGROUP_NAME(__short_size) \ 787 KMALLOC_DMA_NAME(__short_size) \ 788 .size = __size, \ 789 } 790 791 /* 792 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. 793 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is 794 * kmalloc-2M. 795 */ 796 const struct kmalloc_info_struct kmalloc_info[] __initconst = { 797 INIT_KMALLOC_INFO(0, 0), 798 INIT_KMALLOC_INFO(96, 96), 799 INIT_KMALLOC_INFO(192, 192), 800 INIT_KMALLOC_INFO(8, 8), 801 INIT_KMALLOC_INFO(16, 16), 802 INIT_KMALLOC_INFO(32, 32), 803 INIT_KMALLOC_INFO(64, 64), 804 INIT_KMALLOC_INFO(128, 128), 805 INIT_KMALLOC_INFO(256, 256), 806 INIT_KMALLOC_INFO(512, 512), 807 INIT_KMALLOC_INFO(1024, 1k), 808 INIT_KMALLOC_INFO(2048, 2k), 809 INIT_KMALLOC_INFO(4096, 4k), 810 INIT_KMALLOC_INFO(8192, 8k), 811 INIT_KMALLOC_INFO(16384, 16k), 812 INIT_KMALLOC_INFO(32768, 32k), 813 INIT_KMALLOC_INFO(65536, 64k), 814 INIT_KMALLOC_INFO(131072, 128k), 815 INIT_KMALLOC_INFO(262144, 256k), 816 INIT_KMALLOC_INFO(524288, 512k), 817 INIT_KMALLOC_INFO(1048576, 1M), 818 INIT_KMALLOC_INFO(2097152, 2M) 819 }; 820 821 /* 822 * Patch up the size_index table if we have strange large alignment 823 * requirements for the kmalloc array. This is only the case for 824 * MIPS it seems. The standard arches will not generate any code here. 825 * 826 * Largest permitted alignment is 256 bytes due to the way we 827 * handle the index determination for the smaller caches. 828 * 829 * Make sure that nothing crazy happens if someone starts tinkering 830 * around with ARCH_KMALLOC_MINALIGN 831 */ 832 void __init setup_kmalloc_cache_index_table(void) 833 { 834 unsigned int i; 835 836 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 837 !is_power_of_2(KMALLOC_MIN_SIZE)); 838 839 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 840 unsigned int elem = size_index_elem(i); 841 842 if (elem >= ARRAY_SIZE(size_index)) 843 break; 844 size_index[elem] = KMALLOC_SHIFT_LOW; 845 } 846 847 if (KMALLOC_MIN_SIZE >= 64) { 848 /* 849 * The 96 byte sized cache is not used if the alignment 850 * is 64 byte. 851 */ 852 for (i = 64 + 8; i <= 96; i += 8) 853 size_index[size_index_elem(i)] = 7; 854 855 } 856 857 if (KMALLOC_MIN_SIZE >= 128) { 858 /* 859 * The 192 byte sized cache is not used if the alignment 860 * is 128 byte. Redirect kmalloc to use the 256 byte cache 861 * instead. 862 */ 863 for (i = 128 + 8; i <= 192; i += 8) 864 size_index[size_index_elem(i)] = 8; 865 } 866 } 867 868 static void __init 869 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) 870 { 871 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { 872 flags |= SLAB_RECLAIM_ACCOUNT; 873 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { 874 if (mem_cgroup_kmem_disabled()) { 875 kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx]; 876 return; 877 } 878 flags |= SLAB_ACCOUNT; 879 } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) { 880 flags |= SLAB_CACHE_DMA; 881 } 882 883 kmalloc_caches[type][idx] = create_kmalloc_cache( 884 kmalloc_info[idx].name[type], 885 kmalloc_info[idx].size, flags, 0, 886 kmalloc_info[idx].size); 887 888 /* 889 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for 890 * KMALLOC_NORMAL caches. 891 */ 892 if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) 893 kmalloc_caches[type][idx]->refcount = -1; 894 } 895 896 /* 897 * Create the kmalloc array. Some of the regular kmalloc arrays 898 * may already have been created because they were needed to 899 * enable allocations for slab creation. 900 */ 901 void __init create_kmalloc_caches(slab_flags_t flags) 902 { 903 int i; 904 enum kmalloc_cache_type type; 905 906 /* 907 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined 908 */ 909 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) { 910 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 911 if (!kmalloc_caches[type][i]) 912 new_kmalloc_cache(i, type, flags); 913 914 /* 915 * Caches that are not of the two-to-the-power-of size. 916 * These have to be created immediately after the 917 * earlier power of two caches 918 */ 919 if (KMALLOC_MIN_SIZE <= 32 && i == 6 && 920 !kmalloc_caches[type][1]) 921 new_kmalloc_cache(1, type, flags); 922 if (KMALLOC_MIN_SIZE <= 64 && i == 7 && 923 !kmalloc_caches[type][2]) 924 new_kmalloc_cache(2, type, flags); 925 } 926 } 927 928 /* Kmalloc array is now usable */ 929 slab_state = UP; 930 } 931 932 void free_large_kmalloc(struct folio *folio, void *object) 933 { 934 unsigned int order = folio_order(folio); 935 936 if (WARN_ON_ONCE(order == 0)) 937 pr_warn_once("object pointer: 0x%p\n", object); 938 939 kmemleak_free(object); 940 kasan_kfree_large(object); 941 kmsan_kfree_large(object); 942 943 mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, 944 -(PAGE_SIZE << order)); 945 __free_pages(folio_page(folio, 0), order); 946 } 947 948 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node); 949 static __always_inline 950 void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 951 { 952 struct kmem_cache *s; 953 void *ret; 954 955 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 956 ret = __kmalloc_large_node(size, flags, node); 957 trace_kmalloc(caller, ret, size, 958 PAGE_SIZE << get_order(size), flags, node); 959 return ret; 960 } 961 962 s = kmalloc_slab(size, flags); 963 964 if (unlikely(ZERO_OR_NULL_PTR(s))) 965 return s; 966 967 ret = __kmem_cache_alloc_node(s, flags, node, size, caller); 968 ret = kasan_kmalloc(s, ret, size, flags); 969 trace_kmalloc(caller, ret, size, s->size, flags, node); 970 return ret; 971 } 972 973 void *__kmalloc_node(size_t size, gfp_t flags, int node) 974 { 975 return __do_kmalloc_node(size, flags, node, _RET_IP_); 976 } 977 EXPORT_SYMBOL(__kmalloc_node); 978 979 void *__kmalloc(size_t size, gfp_t flags) 980 { 981 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); 982 } 983 EXPORT_SYMBOL(__kmalloc); 984 985 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 986 int node, unsigned long caller) 987 { 988 return __do_kmalloc_node(size, flags, node, caller); 989 } 990 EXPORT_SYMBOL(__kmalloc_node_track_caller); 991 992 /** 993 * kfree - free previously allocated memory 994 * @object: pointer returned by kmalloc. 995 * 996 * If @object is NULL, no operation is performed. 997 * 998 * Don't free memory not originally allocated by kmalloc() 999 * or you will run into trouble. 1000 */ 1001 void kfree(const void *object) 1002 { 1003 struct folio *folio; 1004 struct slab *slab; 1005 struct kmem_cache *s; 1006 1007 trace_kfree(_RET_IP_, object); 1008 1009 if (unlikely(ZERO_OR_NULL_PTR(object))) 1010 return; 1011 1012 folio = virt_to_folio(object); 1013 if (unlikely(!folio_test_slab(folio))) { 1014 free_large_kmalloc(folio, (void *)object); 1015 return; 1016 } 1017 1018 slab = folio_slab(folio); 1019 s = slab->slab_cache; 1020 __kmem_cache_free(s, (void *)object, _RET_IP_); 1021 } 1022 EXPORT_SYMBOL(kfree); 1023 1024 /** 1025 * __ksize -- Report full size of underlying allocation 1026 * @object: pointer to the object 1027 * 1028 * This should only be used internally to query the true size of allocations. 1029 * It is not meant to be a way to discover the usable size of an allocation 1030 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond 1031 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, 1032 * and/or FORTIFY_SOURCE. 1033 * 1034 * Return: size of the actual memory used by @object in bytes 1035 */ 1036 size_t __ksize(const void *object) 1037 { 1038 struct folio *folio; 1039 1040 if (unlikely(object == ZERO_SIZE_PTR)) 1041 return 0; 1042 1043 folio = virt_to_folio(object); 1044 1045 if (unlikely(!folio_test_slab(folio))) { 1046 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) 1047 return 0; 1048 if (WARN_ON(object != folio_address(folio))) 1049 return 0; 1050 return folio_size(folio); 1051 } 1052 1053 #ifdef CONFIG_SLUB_DEBUG 1054 skip_orig_size_check(folio_slab(folio)->slab_cache, object); 1055 #endif 1056 1057 return slab_ksize(folio_slab(folio)->slab_cache); 1058 } 1059 1060 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 1061 { 1062 void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE, 1063 size, _RET_IP_); 1064 1065 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 1066 1067 ret = kasan_kmalloc(s, ret, size, gfpflags); 1068 return ret; 1069 } 1070 EXPORT_SYMBOL(kmalloc_trace); 1071 1072 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, 1073 int node, size_t size) 1074 { 1075 void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_); 1076 1077 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 1078 1079 ret = kasan_kmalloc(s, ret, size, gfpflags); 1080 return ret; 1081 } 1082 EXPORT_SYMBOL(kmalloc_node_trace); 1083 #endif /* !CONFIG_SLOB */ 1084 1085 gfp_t kmalloc_fix_flags(gfp_t flags) 1086 { 1087 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; 1088 1089 flags &= ~GFP_SLAB_BUG_MASK; 1090 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 1091 invalid_mask, &invalid_mask, flags, &flags); 1092 dump_stack(); 1093 1094 return flags; 1095 } 1096 1097 /* 1098 * To avoid unnecessary overhead, we pass through large allocation requests 1099 * directly to the page allocator. We use __GFP_COMP, because we will need to 1100 * know the allocation order to free the pages properly in kfree. 1101 */ 1102 1103 static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) 1104 { 1105 struct page *page; 1106 void *ptr = NULL; 1107 unsigned int order = get_order(size); 1108 1109 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1110 flags = kmalloc_fix_flags(flags); 1111 1112 flags |= __GFP_COMP; 1113 page = alloc_pages_node(node, flags, order); 1114 if (page) { 1115 ptr = page_address(page); 1116 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 1117 PAGE_SIZE << order); 1118 } 1119 1120 ptr = kasan_kmalloc_large(ptr, size, flags); 1121 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1122 kmemleak_alloc(ptr, size, 1, flags); 1123 kmsan_kmalloc_large(ptr, size, flags); 1124 1125 return ptr; 1126 } 1127 1128 void *kmalloc_large(size_t size, gfp_t flags) 1129 { 1130 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); 1131 1132 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 1133 flags, NUMA_NO_NODE); 1134 return ret; 1135 } 1136 EXPORT_SYMBOL(kmalloc_large); 1137 1138 void *kmalloc_large_node(size_t size, gfp_t flags, int node) 1139 { 1140 void *ret = __kmalloc_large_node(size, flags, node); 1141 1142 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 1143 flags, node); 1144 return ret; 1145 } 1146 EXPORT_SYMBOL(kmalloc_large_node); 1147 1148 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1149 /* Randomize a generic freelist */ 1150 static void freelist_randomize(struct rnd_state *state, unsigned int *list, 1151 unsigned int count) 1152 { 1153 unsigned int rand; 1154 unsigned int i; 1155 1156 for (i = 0; i < count; i++) 1157 list[i] = i; 1158 1159 /* Fisher-Yates shuffle */ 1160 for (i = count - 1; i > 0; i--) { 1161 rand = prandom_u32_state(state); 1162 rand %= (i + 1); 1163 swap(list[i], list[rand]); 1164 } 1165 } 1166 1167 /* Create a random sequence per cache */ 1168 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 1169 gfp_t gfp) 1170 { 1171 struct rnd_state state; 1172 1173 if (count < 2 || cachep->random_seq) 1174 return 0; 1175 1176 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); 1177 if (!cachep->random_seq) 1178 return -ENOMEM; 1179 1180 /* Get best entropy at this stage of boot */ 1181 prandom_seed_state(&state, get_random_long()); 1182 1183 freelist_randomize(&state, cachep->random_seq, count); 1184 return 0; 1185 } 1186 1187 /* Destroy the per-cache random freelist sequence */ 1188 void cache_random_seq_destroy(struct kmem_cache *cachep) 1189 { 1190 kfree(cachep->random_seq); 1191 cachep->random_seq = NULL; 1192 } 1193 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1194 1195 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 1196 #ifdef CONFIG_SLAB 1197 #define SLABINFO_RIGHTS (0600) 1198 #else 1199 #define SLABINFO_RIGHTS (0400) 1200 #endif 1201 1202 static void print_slabinfo_header(struct seq_file *m) 1203 { 1204 /* 1205 * Output format version, so at least we can change it 1206 * without _too_ many complaints. 1207 */ 1208 #ifdef CONFIG_DEBUG_SLAB 1209 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 1210 #else 1211 seq_puts(m, "slabinfo - version: 2.1\n"); 1212 #endif 1213 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); 1214 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 1215 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 1216 #ifdef CONFIG_DEBUG_SLAB 1217 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 1218 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 1219 #endif 1220 seq_putc(m, '\n'); 1221 } 1222 1223 static void *slab_start(struct seq_file *m, loff_t *pos) 1224 { 1225 mutex_lock(&slab_mutex); 1226 return seq_list_start(&slab_caches, *pos); 1227 } 1228 1229 static void *slab_next(struct seq_file *m, void *p, loff_t *pos) 1230 { 1231 return seq_list_next(p, &slab_caches, pos); 1232 } 1233 1234 static void slab_stop(struct seq_file *m, void *p) 1235 { 1236 mutex_unlock(&slab_mutex); 1237 } 1238 1239 static void cache_show(struct kmem_cache *s, struct seq_file *m) 1240 { 1241 struct slabinfo sinfo; 1242 1243 memset(&sinfo, 0, sizeof(sinfo)); 1244 get_slabinfo(s, &sinfo); 1245 1246 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 1247 s->name, sinfo.active_objs, sinfo.num_objs, s->size, 1248 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 1249 1250 seq_printf(m, " : tunables %4u %4u %4u", 1251 sinfo.limit, sinfo.batchcount, sinfo.shared); 1252 seq_printf(m, " : slabdata %6lu %6lu %6lu", 1253 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 1254 slabinfo_show_stats(m, s); 1255 seq_putc(m, '\n'); 1256 } 1257 1258 static int slab_show(struct seq_file *m, void *p) 1259 { 1260 struct kmem_cache *s = list_entry(p, struct kmem_cache, list); 1261 1262 if (p == slab_caches.next) 1263 print_slabinfo_header(m); 1264 cache_show(s, m); 1265 return 0; 1266 } 1267 1268 void dump_unreclaimable_slab(void) 1269 { 1270 struct kmem_cache *s; 1271 struct slabinfo sinfo; 1272 1273 /* 1274 * Here acquiring slab_mutex is risky since we don't prefer to get 1275 * sleep in oom path. But, without mutex hold, it may introduce a 1276 * risk of crash. 1277 * Use mutex_trylock to protect the list traverse, dump nothing 1278 * without acquiring the mutex. 1279 */ 1280 if (!mutex_trylock(&slab_mutex)) { 1281 pr_warn("excessive unreclaimable slab but cannot dump stats\n"); 1282 return; 1283 } 1284 1285 pr_info("Unreclaimable slab info:\n"); 1286 pr_info("Name Used Total\n"); 1287 1288 list_for_each_entry(s, &slab_caches, list) { 1289 if (s->flags & SLAB_RECLAIM_ACCOUNT) 1290 continue; 1291 1292 get_slabinfo(s, &sinfo); 1293 1294 if (sinfo.num_objs > 0) 1295 pr_info("%-17s %10luKB %10luKB\n", s->name, 1296 (sinfo.active_objs * s->size) / 1024, 1297 (sinfo.num_objs * s->size) / 1024); 1298 } 1299 mutex_unlock(&slab_mutex); 1300 } 1301 1302 /* 1303 * slabinfo_op - iterator that generates /proc/slabinfo 1304 * 1305 * Output layout: 1306 * cache-name 1307 * num-active-objs 1308 * total-objs 1309 * object size 1310 * num-active-slabs 1311 * total-slabs 1312 * num-pages-per-slab 1313 * + further values on SMP and with statistics enabled 1314 */ 1315 static const struct seq_operations slabinfo_op = { 1316 .start = slab_start, 1317 .next = slab_next, 1318 .stop = slab_stop, 1319 .show = slab_show, 1320 }; 1321 1322 static int slabinfo_open(struct inode *inode, struct file *file) 1323 { 1324 return seq_open(file, &slabinfo_op); 1325 } 1326 1327 static const struct proc_ops slabinfo_proc_ops = { 1328 .proc_flags = PROC_ENTRY_PERMANENT, 1329 .proc_open = slabinfo_open, 1330 .proc_read = seq_read, 1331 .proc_write = slabinfo_write, 1332 .proc_lseek = seq_lseek, 1333 .proc_release = seq_release, 1334 }; 1335 1336 static int __init slab_proc_init(void) 1337 { 1338 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops); 1339 return 0; 1340 } 1341 module_init(slab_proc_init); 1342 1343 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ 1344 1345 static __always_inline __realloc_size(2) void * 1346 __do_krealloc(const void *p, size_t new_size, gfp_t flags) 1347 { 1348 void *ret; 1349 size_t ks; 1350 1351 /* Check for double-free before calling ksize. */ 1352 if (likely(!ZERO_OR_NULL_PTR(p))) { 1353 if (!kasan_check_byte(p)) 1354 return NULL; 1355 ks = ksize(p); 1356 } else 1357 ks = 0; 1358 1359 /* If the object still fits, repoison it precisely. */ 1360 if (ks >= new_size) { 1361 p = kasan_krealloc((void *)p, new_size, flags); 1362 return (void *)p; 1363 } 1364 1365 ret = kmalloc_track_caller(new_size, flags); 1366 if (ret && p) { 1367 /* Disable KASAN checks as the object's redzone is accessed. */ 1368 kasan_disable_current(); 1369 memcpy(ret, kasan_reset_tag(p), ks); 1370 kasan_enable_current(); 1371 } 1372 1373 return ret; 1374 } 1375 1376 /** 1377 * krealloc - reallocate memory. The contents will remain unchanged. 1378 * @p: object to reallocate memory for. 1379 * @new_size: how many bytes of memory are required. 1380 * @flags: the type of memory to allocate. 1381 * 1382 * The contents of the object pointed to are preserved up to the 1383 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored). 1384 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size 1385 * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 1386 * 1387 * Return: pointer to the allocated memory or %NULL in case of error 1388 */ 1389 void *krealloc(const void *p, size_t new_size, gfp_t flags) 1390 { 1391 void *ret; 1392 1393 if (unlikely(!new_size)) { 1394 kfree(p); 1395 return ZERO_SIZE_PTR; 1396 } 1397 1398 ret = __do_krealloc(p, new_size, flags); 1399 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) 1400 kfree(p); 1401 1402 return ret; 1403 } 1404 EXPORT_SYMBOL(krealloc); 1405 1406 /** 1407 * kfree_sensitive - Clear sensitive information in memory before freeing 1408 * @p: object to free memory of 1409 * 1410 * The memory of the object @p points to is zeroed before freed. 1411 * If @p is %NULL, kfree_sensitive() does nothing. 1412 * 1413 * Note: this function zeroes the whole allocated buffer which can be a good 1414 * deal bigger than the requested buffer size passed to kmalloc(). So be 1415 * careful when using this function in performance sensitive code. 1416 */ 1417 void kfree_sensitive(const void *p) 1418 { 1419 size_t ks; 1420 void *mem = (void *)p; 1421 1422 ks = ksize(mem); 1423 if (ks) { 1424 kasan_unpoison_range(mem, ks); 1425 memzero_explicit(mem, ks); 1426 } 1427 kfree(mem); 1428 } 1429 EXPORT_SYMBOL(kfree_sensitive); 1430 1431 size_t ksize(const void *objp) 1432 { 1433 /* 1434 * We need to first check that the pointer to the object is valid. 1435 * The KASAN report printed from ksize() is more useful, then when 1436 * it's printed later when the behaviour could be undefined due to 1437 * a potential use-after-free or double-free. 1438 * 1439 * We use kasan_check_byte(), which is supported for the hardware 1440 * tag-based KASAN mode, unlike kasan_check_read/write(). 1441 * 1442 * If the pointed to memory is invalid, we return 0 to avoid users of 1443 * ksize() writing to and potentially corrupting the memory region. 1444 * 1445 * We want to perform the check before __ksize(), to avoid potentially 1446 * crashing in __ksize() due to accessing invalid metadata. 1447 */ 1448 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) 1449 return 0; 1450 1451 return kfence_ksize(objp) ?: __ksize(objp); 1452 } 1453 EXPORT_SYMBOL(ksize); 1454 1455 /* Tracepoints definitions. */ 1456 EXPORT_TRACEPOINT_SYMBOL(kmalloc); 1457 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 1458 EXPORT_TRACEPOINT_SYMBOL(kfree); 1459 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); 1460 1461 int should_failslab(struct kmem_cache *s, gfp_t gfpflags) 1462 { 1463 if (__should_failslab(s, gfpflags)) 1464 return -ENOMEM; 1465 return 0; 1466 } 1467 ALLOW_ERROR_INJECTION(should_failslab, ERRNO); 1468