1 /* 2 * Slab allocator functions that are independent of the allocator strategy 3 * 4 * (C) 2012 Christoph Lameter <cl@linux.com> 5 */ 6 #include <linux/slab.h> 7 8 #include <linux/mm.h> 9 #include <linux/poison.h> 10 #include <linux/interrupt.h> 11 #include <linux/memory.h> 12 #include <linux/compiler.h> 13 #include <linux/module.h> 14 #include <linux/cpu.h> 15 #include <linux/uaccess.h> 16 #include <linux/seq_file.h> 17 #include <linux/proc_fs.h> 18 #include <asm/cacheflush.h> 19 #include <asm/tlbflush.h> 20 #include <asm/page.h> 21 #include <linux/memcontrol.h> 22 23 #define CREATE_TRACE_POINTS 24 #include <trace/events/kmem.h> 25 26 #include "slab.h" 27 28 enum slab_state slab_state; 29 LIST_HEAD(slab_caches); 30 DEFINE_MUTEX(slab_mutex); 31 struct kmem_cache *kmem_cache; 32 33 static LIST_HEAD(slab_caches_to_rcu_destroy); 34 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); 35 static DECLARE_WORK(slab_caches_to_rcu_destroy_work, 36 slab_caches_to_rcu_destroy_workfn); 37 38 /* 39 * Set of flags that will prevent slab merging 40 */ 41 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 42 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ 43 SLAB_FAILSLAB | SLAB_KASAN) 44 45 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 46 SLAB_NOTRACK | SLAB_ACCOUNT) 47 48 /* 49 * Merge control. If this is set then no merging of slab caches will occur. 50 */ 51 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); 52 53 static int __init setup_slab_nomerge(char *str) 54 { 55 slab_nomerge = true; 56 return 1; 57 } 58 59 #ifdef CONFIG_SLUB 60 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); 61 #endif 62 63 __setup("slab_nomerge", setup_slab_nomerge); 64 65 /* 66 * Determine the size of a slab object 67 */ 68 unsigned int kmem_cache_size(struct kmem_cache *s) 69 { 70 return s->object_size; 71 } 72 EXPORT_SYMBOL(kmem_cache_size); 73 74 #ifdef CONFIG_DEBUG_VM 75 static int kmem_cache_sanity_check(const char *name, size_t size) 76 { 77 struct kmem_cache *s = NULL; 78 79 if (!name || in_interrupt() || size < sizeof(void *) || 80 size > KMALLOC_MAX_SIZE) { 81 pr_err("kmem_cache_create(%s) integrity check failed\n", name); 82 return -EINVAL; 83 } 84 85 list_for_each_entry(s, &slab_caches, list) { 86 char tmp; 87 int res; 88 89 /* 90 * This happens when the module gets unloaded and doesn't 91 * destroy its slab cache and no-one else reuses the vmalloc 92 * area of the module. Print a warning. 93 */ 94 res = probe_kernel_address(s->name, tmp); 95 if (res) { 96 pr_err("Slab cache with size %d has lost its name\n", 97 s->object_size); 98 continue; 99 } 100 } 101 102 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 103 return 0; 104 } 105 #else 106 static inline int kmem_cache_sanity_check(const char *name, size_t size) 107 { 108 return 0; 109 } 110 #endif 111 112 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) 113 { 114 size_t i; 115 116 for (i = 0; i < nr; i++) { 117 if (s) 118 kmem_cache_free(s, p[i]); 119 else 120 kfree(p[i]); 121 } 122 } 123 124 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 125 void **p) 126 { 127 size_t i; 128 129 for (i = 0; i < nr; i++) { 130 void *x = p[i] = kmem_cache_alloc(s, flags); 131 if (!x) { 132 __kmem_cache_free_bulk(s, i, p); 133 return 0; 134 } 135 } 136 return i; 137 } 138 139 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 140 141 LIST_HEAD(slab_root_caches); 142 143 void slab_init_memcg_params(struct kmem_cache *s) 144 { 145 s->memcg_params.root_cache = NULL; 146 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); 147 INIT_LIST_HEAD(&s->memcg_params.children); 148 } 149 150 static int init_memcg_params(struct kmem_cache *s, 151 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 152 { 153 struct memcg_cache_array *arr; 154 155 if (root_cache) { 156 s->memcg_params.root_cache = root_cache; 157 s->memcg_params.memcg = memcg; 158 INIT_LIST_HEAD(&s->memcg_params.children_node); 159 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); 160 return 0; 161 } 162 163 slab_init_memcg_params(s); 164 165 if (!memcg_nr_cache_ids) 166 return 0; 167 168 arr = kvzalloc(sizeof(struct memcg_cache_array) + 169 memcg_nr_cache_ids * sizeof(void *), 170 GFP_KERNEL); 171 if (!arr) 172 return -ENOMEM; 173 174 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); 175 return 0; 176 } 177 178 static void destroy_memcg_params(struct kmem_cache *s) 179 { 180 if (is_root_cache(s)) 181 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); 182 } 183 184 static void free_memcg_params(struct rcu_head *rcu) 185 { 186 struct memcg_cache_array *old; 187 188 old = container_of(rcu, struct memcg_cache_array, rcu); 189 kvfree(old); 190 } 191 192 static int update_memcg_params(struct kmem_cache *s, int new_array_size) 193 { 194 struct memcg_cache_array *old, *new; 195 196 new = kvzalloc(sizeof(struct memcg_cache_array) + 197 new_array_size * sizeof(void *), GFP_KERNEL); 198 if (!new) 199 return -ENOMEM; 200 201 old = rcu_dereference_protected(s->memcg_params.memcg_caches, 202 lockdep_is_held(&slab_mutex)); 203 if (old) 204 memcpy(new->entries, old->entries, 205 memcg_nr_cache_ids * sizeof(void *)); 206 207 rcu_assign_pointer(s->memcg_params.memcg_caches, new); 208 if (old) 209 call_rcu(&old->rcu, free_memcg_params); 210 return 0; 211 } 212 213 int memcg_update_all_caches(int num_memcgs) 214 { 215 struct kmem_cache *s; 216 int ret = 0; 217 218 mutex_lock(&slab_mutex); 219 list_for_each_entry(s, &slab_root_caches, root_caches_node) { 220 ret = update_memcg_params(s, num_memcgs); 221 /* 222 * Instead of freeing the memory, we'll just leave the caches 223 * up to this point in an updated state. 224 */ 225 if (ret) 226 break; 227 } 228 mutex_unlock(&slab_mutex); 229 return ret; 230 } 231 232 void memcg_link_cache(struct kmem_cache *s) 233 { 234 if (is_root_cache(s)) { 235 list_add(&s->root_caches_node, &slab_root_caches); 236 } else { 237 list_add(&s->memcg_params.children_node, 238 &s->memcg_params.root_cache->memcg_params.children); 239 list_add(&s->memcg_params.kmem_caches_node, 240 &s->memcg_params.memcg->kmem_caches); 241 } 242 } 243 244 static void memcg_unlink_cache(struct kmem_cache *s) 245 { 246 if (is_root_cache(s)) { 247 list_del(&s->root_caches_node); 248 } else { 249 list_del(&s->memcg_params.children_node); 250 list_del(&s->memcg_params.kmem_caches_node); 251 } 252 } 253 #else 254 static inline int init_memcg_params(struct kmem_cache *s, 255 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 256 { 257 return 0; 258 } 259 260 static inline void destroy_memcg_params(struct kmem_cache *s) 261 { 262 } 263 264 static inline void memcg_unlink_cache(struct kmem_cache *s) 265 { 266 } 267 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 268 269 /* 270 * Find a mergeable slab cache 271 */ 272 int slab_unmergeable(struct kmem_cache *s) 273 { 274 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) 275 return 1; 276 277 if (!is_root_cache(s)) 278 return 1; 279 280 if (s->ctor) 281 return 1; 282 283 /* 284 * We may have set a slab to be unmergeable during bootstrap. 285 */ 286 if (s->refcount < 0) 287 return 1; 288 289 return 0; 290 } 291 292 struct kmem_cache *find_mergeable(size_t size, size_t align, 293 unsigned long flags, const char *name, void (*ctor)(void *)) 294 { 295 struct kmem_cache *s; 296 297 if (slab_nomerge) 298 return NULL; 299 300 if (ctor) 301 return NULL; 302 303 size = ALIGN(size, sizeof(void *)); 304 align = calculate_alignment(flags, align, size); 305 size = ALIGN(size, align); 306 flags = kmem_cache_flags(size, flags, name, NULL); 307 308 if (flags & SLAB_NEVER_MERGE) 309 return NULL; 310 311 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) { 312 if (slab_unmergeable(s)) 313 continue; 314 315 if (size > s->size) 316 continue; 317 318 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) 319 continue; 320 /* 321 * Check if alignment is compatible. 322 * Courtesy of Adrian Drzewiecki 323 */ 324 if ((s->size & ~(align - 1)) != s->size) 325 continue; 326 327 if (s->size - size >= sizeof(void *)) 328 continue; 329 330 if (IS_ENABLED(CONFIG_SLAB) && align && 331 (align > s->align || s->align % align)) 332 continue; 333 334 return s; 335 } 336 return NULL; 337 } 338 339 /* 340 * Figure out what the alignment of the objects will be given a set of 341 * flags, a user specified alignment and the size of the objects. 342 */ 343 unsigned long calculate_alignment(unsigned long flags, 344 unsigned long align, unsigned long size) 345 { 346 /* 347 * If the user wants hardware cache aligned objects then follow that 348 * suggestion if the object is sufficiently large. 349 * 350 * The hardware cache alignment cannot override the specified 351 * alignment though. If that is greater then use it. 352 */ 353 if (flags & SLAB_HWCACHE_ALIGN) { 354 unsigned long ralign = cache_line_size(); 355 while (size <= ralign / 2) 356 ralign /= 2; 357 align = max(align, ralign); 358 } 359 360 if (align < ARCH_SLAB_MINALIGN) 361 align = ARCH_SLAB_MINALIGN; 362 363 return ALIGN(align, sizeof(void *)); 364 } 365 366 static struct kmem_cache *create_cache(const char *name, 367 size_t object_size, size_t size, size_t align, 368 unsigned long flags, void (*ctor)(void *), 369 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 370 { 371 struct kmem_cache *s; 372 int err; 373 374 err = -ENOMEM; 375 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 376 if (!s) 377 goto out; 378 379 s->name = name; 380 s->object_size = object_size; 381 s->size = size; 382 s->align = align; 383 s->ctor = ctor; 384 385 err = init_memcg_params(s, memcg, root_cache); 386 if (err) 387 goto out_free_cache; 388 389 err = __kmem_cache_create(s, flags); 390 if (err) 391 goto out_free_cache; 392 393 s->refcount = 1; 394 list_add(&s->list, &slab_caches); 395 memcg_link_cache(s); 396 out: 397 if (err) 398 return ERR_PTR(err); 399 return s; 400 401 out_free_cache: 402 destroy_memcg_params(s); 403 kmem_cache_free(kmem_cache, s); 404 goto out; 405 } 406 407 /* 408 * kmem_cache_create - Create a cache. 409 * @name: A string which is used in /proc/slabinfo to identify this cache. 410 * @size: The size of objects to be created in this cache. 411 * @align: The required alignment for the objects. 412 * @flags: SLAB flags 413 * @ctor: A constructor for the objects. 414 * 415 * Returns a ptr to the cache on success, NULL on failure. 416 * Cannot be called within a interrupt, but can be interrupted. 417 * The @ctor is run when new pages are allocated by the cache. 418 * 419 * The flags are 420 * 421 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 422 * to catch references to uninitialised memory. 423 * 424 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 425 * for buffer overruns. 426 * 427 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 428 * cacheline. This can be beneficial if you're counting cycles as closely 429 * as davem. 430 */ 431 struct kmem_cache * 432 kmem_cache_create(const char *name, size_t size, size_t align, 433 unsigned long flags, void (*ctor)(void *)) 434 { 435 struct kmem_cache *s = NULL; 436 const char *cache_name; 437 int err; 438 439 get_online_cpus(); 440 get_online_mems(); 441 memcg_get_cache_ids(); 442 443 mutex_lock(&slab_mutex); 444 445 err = kmem_cache_sanity_check(name, size); 446 if (err) { 447 goto out_unlock; 448 } 449 450 /* Refuse requests with allocator specific flags */ 451 if (flags & ~SLAB_FLAGS_PERMITTED) { 452 err = -EINVAL; 453 goto out_unlock; 454 } 455 456 /* 457 * Some allocators will constraint the set of valid flags to a subset 458 * of all flags. We expect them to define CACHE_CREATE_MASK in this 459 * case, and we'll just provide them with a sanitized version of the 460 * passed flags. 461 */ 462 flags &= CACHE_CREATE_MASK; 463 464 s = __kmem_cache_alias(name, size, align, flags, ctor); 465 if (s) 466 goto out_unlock; 467 468 cache_name = kstrdup_const(name, GFP_KERNEL); 469 if (!cache_name) { 470 err = -ENOMEM; 471 goto out_unlock; 472 } 473 474 s = create_cache(cache_name, size, size, 475 calculate_alignment(flags, align, size), 476 flags, ctor, NULL, NULL); 477 if (IS_ERR(s)) { 478 err = PTR_ERR(s); 479 kfree_const(cache_name); 480 } 481 482 out_unlock: 483 mutex_unlock(&slab_mutex); 484 485 memcg_put_cache_ids(); 486 put_online_mems(); 487 put_online_cpus(); 488 489 if (err) { 490 if (flags & SLAB_PANIC) 491 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 492 name, err); 493 else { 494 pr_warn("kmem_cache_create(%s) failed with error %d\n", 495 name, err); 496 dump_stack(); 497 } 498 return NULL; 499 } 500 return s; 501 } 502 EXPORT_SYMBOL(kmem_cache_create); 503 504 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) 505 { 506 LIST_HEAD(to_destroy); 507 struct kmem_cache *s, *s2; 508 509 /* 510 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the 511 * @slab_caches_to_rcu_destroy list. The slab pages are freed 512 * through RCU and and the associated kmem_cache are dereferenced 513 * while freeing the pages, so the kmem_caches should be freed only 514 * after the pending RCU operations are finished. As rcu_barrier() 515 * is a pretty slow operation, we batch all pending destructions 516 * asynchronously. 517 */ 518 mutex_lock(&slab_mutex); 519 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); 520 mutex_unlock(&slab_mutex); 521 522 if (list_empty(&to_destroy)) 523 return; 524 525 rcu_barrier(); 526 527 list_for_each_entry_safe(s, s2, &to_destroy, list) { 528 #ifdef SLAB_SUPPORTS_SYSFS 529 sysfs_slab_release(s); 530 #else 531 slab_kmem_cache_release(s); 532 #endif 533 } 534 } 535 536 static int shutdown_cache(struct kmem_cache *s) 537 { 538 /* free asan quarantined objects */ 539 kasan_cache_shutdown(s); 540 541 if (__kmem_cache_shutdown(s) != 0) 542 return -EBUSY; 543 544 memcg_unlink_cache(s); 545 list_del(&s->list); 546 547 if (s->flags & SLAB_TYPESAFE_BY_RCU) { 548 list_add_tail(&s->list, &slab_caches_to_rcu_destroy); 549 schedule_work(&slab_caches_to_rcu_destroy_work); 550 } else { 551 #ifdef SLAB_SUPPORTS_SYSFS 552 sysfs_slab_release(s); 553 #else 554 slab_kmem_cache_release(s); 555 #endif 556 } 557 558 return 0; 559 } 560 561 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 562 /* 563 * memcg_create_kmem_cache - Create a cache for a memory cgroup. 564 * @memcg: The memory cgroup the new cache is for. 565 * @root_cache: The parent of the new cache. 566 * 567 * This function attempts to create a kmem cache that will serve allocation 568 * requests going from @memcg to @root_cache. The new cache inherits properties 569 * from its parent. 570 */ 571 void memcg_create_kmem_cache(struct mem_cgroup *memcg, 572 struct kmem_cache *root_cache) 573 { 574 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ 575 struct cgroup_subsys_state *css = &memcg->css; 576 struct memcg_cache_array *arr; 577 struct kmem_cache *s = NULL; 578 char *cache_name; 579 int idx; 580 581 get_online_cpus(); 582 get_online_mems(); 583 584 mutex_lock(&slab_mutex); 585 586 /* 587 * The memory cgroup could have been offlined while the cache 588 * creation work was pending. 589 */ 590 if (memcg->kmem_state != KMEM_ONLINE) 591 goto out_unlock; 592 593 idx = memcg_cache_id(memcg); 594 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, 595 lockdep_is_held(&slab_mutex)); 596 597 /* 598 * Since per-memcg caches are created asynchronously on first 599 * allocation (see memcg_kmem_get_cache()), several threads can try to 600 * create the same cache, but only one of them may succeed. 601 */ 602 if (arr->entries[idx]) 603 goto out_unlock; 604 605 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); 606 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, 607 css->serial_nr, memcg_name_buf); 608 if (!cache_name) 609 goto out_unlock; 610 611 s = create_cache(cache_name, root_cache->object_size, 612 root_cache->size, root_cache->align, 613 root_cache->flags & CACHE_CREATE_MASK, 614 root_cache->ctor, memcg, root_cache); 615 /* 616 * If we could not create a memcg cache, do not complain, because 617 * that's not critical at all as we can always proceed with the root 618 * cache. 619 */ 620 if (IS_ERR(s)) { 621 kfree(cache_name); 622 goto out_unlock; 623 } 624 625 /* 626 * Since readers won't lock (see cache_from_memcg_idx()), we need a 627 * barrier here to ensure nobody will see the kmem_cache partially 628 * initialized. 629 */ 630 smp_wmb(); 631 arr->entries[idx] = s; 632 633 out_unlock: 634 mutex_unlock(&slab_mutex); 635 636 put_online_mems(); 637 put_online_cpus(); 638 } 639 640 static void kmemcg_deactivate_workfn(struct work_struct *work) 641 { 642 struct kmem_cache *s = container_of(work, struct kmem_cache, 643 memcg_params.deact_work); 644 645 get_online_cpus(); 646 get_online_mems(); 647 648 mutex_lock(&slab_mutex); 649 650 s->memcg_params.deact_fn(s); 651 652 mutex_unlock(&slab_mutex); 653 654 put_online_mems(); 655 put_online_cpus(); 656 657 /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */ 658 css_put(&s->memcg_params.memcg->css); 659 } 660 661 static void kmemcg_deactivate_rcufn(struct rcu_head *head) 662 { 663 struct kmem_cache *s = container_of(head, struct kmem_cache, 664 memcg_params.deact_rcu_head); 665 666 /* 667 * We need to grab blocking locks. Bounce to ->deact_work. The 668 * work item shares the space with the RCU head and can't be 669 * initialized eariler. 670 */ 671 INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn); 672 queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work); 673 } 674 675 /** 676 * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a 677 * sched RCU grace period 678 * @s: target kmem_cache 679 * @deact_fn: deactivation function to call 680 * 681 * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex 682 * held after a sched RCU grace period. The slab is guaranteed to stay 683 * alive until @deact_fn is finished. This is to be used from 684 * __kmemcg_cache_deactivate(). 685 */ 686 void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 687 void (*deact_fn)(struct kmem_cache *)) 688 { 689 if (WARN_ON_ONCE(is_root_cache(s)) || 690 WARN_ON_ONCE(s->memcg_params.deact_fn)) 691 return; 692 693 /* pin memcg so that @s doesn't get destroyed in the middle */ 694 css_get(&s->memcg_params.memcg->css); 695 696 s->memcg_params.deact_fn = deact_fn; 697 call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); 698 } 699 700 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) 701 { 702 int idx; 703 struct memcg_cache_array *arr; 704 struct kmem_cache *s, *c; 705 706 idx = memcg_cache_id(memcg); 707 708 get_online_cpus(); 709 get_online_mems(); 710 711 mutex_lock(&slab_mutex); 712 list_for_each_entry(s, &slab_root_caches, root_caches_node) { 713 arr = rcu_dereference_protected(s->memcg_params.memcg_caches, 714 lockdep_is_held(&slab_mutex)); 715 c = arr->entries[idx]; 716 if (!c) 717 continue; 718 719 __kmemcg_cache_deactivate(c); 720 arr->entries[idx] = NULL; 721 } 722 mutex_unlock(&slab_mutex); 723 724 put_online_mems(); 725 put_online_cpus(); 726 } 727 728 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) 729 { 730 struct kmem_cache *s, *s2; 731 732 get_online_cpus(); 733 get_online_mems(); 734 735 mutex_lock(&slab_mutex); 736 list_for_each_entry_safe(s, s2, &memcg->kmem_caches, 737 memcg_params.kmem_caches_node) { 738 /* 739 * The cgroup is about to be freed and therefore has no charges 740 * left. Hence, all its caches must be empty by now. 741 */ 742 BUG_ON(shutdown_cache(s)); 743 } 744 mutex_unlock(&slab_mutex); 745 746 put_online_mems(); 747 put_online_cpus(); 748 } 749 750 static int shutdown_memcg_caches(struct kmem_cache *s) 751 { 752 struct memcg_cache_array *arr; 753 struct kmem_cache *c, *c2; 754 LIST_HEAD(busy); 755 int i; 756 757 BUG_ON(!is_root_cache(s)); 758 759 /* 760 * First, shutdown active caches, i.e. caches that belong to online 761 * memory cgroups. 762 */ 763 arr = rcu_dereference_protected(s->memcg_params.memcg_caches, 764 lockdep_is_held(&slab_mutex)); 765 for_each_memcg_cache_index(i) { 766 c = arr->entries[i]; 767 if (!c) 768 continue; 769 if (shutdown_cache(c)) 770 /* 771 * The cache still has objects. Move it to a temporary 772 * list so as not to try to destroy it for a second 773 * time while iterating over inactive caches below. 774 */ 775 list_move(&c->memcg_params.children_node, &busy); 776 else 777 /* 778 * The cache is empty and will be destroyed soon. Clear 779 * the pointer to it in the memcg_caches array so that 780 * it will never be accessed even if the root cache 781 * stays alive. 782 */ 783 arr->entries[i] = NULL; 784 } 785 786 /* 787 * Second, shutdown all caches left from memory cgroups that are now 788 * offline. 789 */ 790 list_for_each_entry_safe(c, c2, &s->memcg_params.children, 791 memcg_params.children_node) 792 shutdown_cache(c); 793 794 list_splice(&busy, &s->memcg_params.children); 795 796 /* 797 * A cache being destroyed must be empty. In particular, this means 798 * that all per memcg caches attached to it must be empty too. 799 */ 800 if (!list_empty(&s->memcg_params.children)) 801 return -EBUSY; 802 return 0; 803 } 804 #else 805 static inline int shutdown_memcg_caches(struct kmem_cache *s) 806 { 807 return 0; 808 } 809 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 810 811 void slab_kmem_cache_release(struct kmem_cache *s) 812 { 813 __kmem_cache_release(s); 814 destroy_memcg_params(s); 815 kfree_const(s->name); 816 kmem_cache_free(kmem_cache, s); 817 } 818 819 void kmem_cache_destroy(struct kmem_cache *s) 820 { 821 int err; 822 823 if (unlikely(!s)) 824 return; 825 826 get_online_cpus(); 827 get_online_mems(); 828 829 mutex_lock(&slab_mutex); 830 831 s->refcount--; 832 if (s->refcount) 833 goto out_unlock; 834 835 err = shutdown_memcg_caches(s); 836 if (!err) 837 err = shutdown_cache(s); 838 839 if (err) { 840 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", 841 s->name); 842 dump_stack(); 843 } 844 out_unlock: 845 mutex_unlock(&slab_mutex); 846 847 put_online_mems(); 848 put_online_cpus(); 849 } 850 EXPORT_SYMBOL(kmem_cache_destroy); 851 852 /** 853 * kmem_cache_shrink - Shrink a cache. 854 * @cachep: The cache to shrink. 855 * 856 * Releases as many slabs as possible for a cache. 857 * To help debugging, a zero exit status indicates all slabs were released. 858 */ 859 int kmem_cache_shrink(struct kmem_cache *cachep) 860 { 861 int ret; 862 863 get_online_cpus(); 864 get_online_mems(); 865 kasan_cache_shrink(cachep); 866 ret = __kmem_cache_shrink(cachep); 867 put_online_mems(); 868 put_online_cpus(); 869 return ret; 870 } 871 EXPORT_SYMBOL(kmem_cache_shrink); 872 873 bool slab_is_available(void) 874 { 875 return slab_state >= UP; 876 } 877 878 #ifndef CONFIG_SLOB 879 /* Create a cache during boot when no slab services are available yet */ 880 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, 881 unsigned long flags) 882 { 883 int err; 884 885 s->name = name; 886 s->size = s->object_size = size; 887 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); 888 889 slab_init_memcg_params(s); 890 891 err = __kmem_cache_create(s, flags); 892 893 if (err) 894 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", 895 name, size, err); 896 897 s->refcount = -1; /* Exempt from merging for now */ 898 } 899 900 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, 901 unsigned long flags) 902 { 903 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 904 905 if (!s) 906 panic("Out of memory when creating slab %s\n", name); 907 908 create_boot_cache(s, name, size, flags); 909 list_add(&s->list, &slab_caches); 910 memcg_link_cache(s); 911 s->refcount = 1; 912 return s; 913 } 914 915 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 916 EXPORT_SYMBOL(kmalloc_caches); 917 918 #ifdef CONFIG_ZONE_DMA 919 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 920 EXPORT_SYMBOL(kmalloc_dma_caches); 921 #endif 922 923 /* 924 * Conversion table for small slabs sizes / 8 to the index in the 925 * kmalloc array. This is necessary for slabs < 192 since we have non power 926 * of two cache sizes there. The size of larger slabs can be determined using 927 * fls. 928 */ 929 static s8 size_index[24] = { 930 3, /* 8 */ 931 4, /* 16 */ 932 5, /* 24 */ 933 5, /* 32 */ 934 6, /* 40 */ 935 6, /* 48 */ 936 6, /* 56 */ 937 6, /* 64 */ 938 1, /* 72 */ 939 1, /* 80 */ 940 1, /* 88 */ 941 1, /* 96 */ 942 7, /* 104 */ 943 7, /* 112 */ 944 7, /* 120 */ 945 7, /* 128 */ 946 2, /* 136 */ 947 2, /* 144 */ 948 2, /* 152 */ 949 2, /* 160 */ 950 2, /* 168 */ 951 2, /* 176 */ 952 2, /* 184 */ 953 2 /* 192 */ 954 }; 955 956 static inline int size_index_elem(size_t bytes) 957 { 958 return (bytes - 1) / 8; 959 } 960 961 /* 962 * Find the kmem_cache structure that serves a given size of 963 * allocation 964 */ 965 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) 966 { 967 int index; 968 969 if (unlikely(size > KMALLOC_MAX_SIZE)) { 970 WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 971 return NULL; 972 } 973 974 if (size <= 192) { 975 if (!size) 976 return ZERO_SIZE_PTR; 977 978 index = size_index[size_index_elem(size)]; 979 } else 980 index = fls(size - 1); 981 982 #ifdef CONFIG_ZONE_DMA 983 if (unlikely((flags & GFP_DMA))) 984 return kmalloc_dma_caches[index]; 985 986 #endif 987 return kmalloc_caches[index]; 988 } 989 990 /* 991 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. 992 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is 993 * kmalloc-67108864. 994 */ 995 const struct kmalloc_info_struct kmalloc_info[] __initconst = { 996 {NULL, 0}, {"kmalloc-96", 96}, 997 {"kmalloc-192", 192}, {"kmalloc-8", 8}, 998 {"kmalloc-16", 16}, {"kmalloc-32", 32}, 999 {"kmalloc-64", 64}, {"kmalloc-128", 128}, 1000 {"kmalloc-256", 256}, {"kmalloc-512", 512}, 1001 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048}, 1002 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192}, 1003 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768}, 1004 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072}, 1005 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288}, 1006 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152}, 1007 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608}, 1008 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432}, 1009 {"kmalloc-67108864", 67108864} 1010 }; 1011 1012 /* 1013 * Patch up the size_index table if we have strange large alignment 1014 * requirements for the kmalloc array. This is only the case for 1015 * MIPS it seems. The standard arches will not generate any code here. 1016 * 1017 * Largest permitted alignment is 256 bytes due to the way we 1018 * handle the index determination for the smaller caches. 1019 * 1020 * Make sure that nothing crazy happens if someone starts tinkering 1021 * around with ARCH_KMALLOC_MINALIGN 1022 */ 1023 void __init setup_kmalloc_cache_index_table(void) 1024 { 1025 int i; 1026 1027 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 1028 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 1029 1030 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 1031 int elem = size_index_elem(i); 1032 1033 if (elem >= ARRAY_SIZE(size_index)) 1034 break; 1035 size_index[elem] = KMALLOC_SHIFT_LOW; 1036 } 1037 1038 if (KMALLOC_MIN_SIZE >= 64) { 1039 /* 1040 * The 96 byte size cache is not used if the alignment 1041 * is 64 byte. 1042 */ 1043 for (i = 64 + 8; i <= 96; i += 8) 1044 size_index[size_index_elem(i)] = 7; 1045 1046 } 1047 1048 if (KMALLOC_MIN_SIZE >= 128) { 1049 /* 1050 * The 192 byte sized cache is not used if the alignment 1051 * is 128 byte. Redirect kmalloc to use the 256 byte cache 1052 * instead. 1053 */ 1054 for (i = 128 + 8; i <= 192; i += 8) 1055 size_index[size_index_elem(i)] = 8; 1056 } 1057 } 1058 1059 static void __init new_kmalloc_cache(int idx, unsigned long flags) 1060 { 1061 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, 1062 kmalloc_info[idx].size, flags); 1063 } 1064 1065 /* 1066 * Create the kmalloc array. Some of the regular kmalloc arrays 1067 * may already have been created because they were needed to 1068 * enable allocations for slab creation. 1069 */ 1070 void __init create_kmalloc_caches(unsigned long flags) 1071 { 1072 int i; 1073 1074 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 1075 if (!kmalloc_caches[i]) 1076 new_kmalloc_cache(i, flags); 1077 1078 /* 1079 * Caches that are not of the two-to-the-power-of size. 1080 * These have to be created immediately after the 1081 * earlier power of two caches 1082 */ 1083 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) 1084 new_kmalloc_cache(1, flags); 1085 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) 1086 new_kmalloc_cache(2, flags); 1087 } 1088 1089 /* Kmalloc array is now usable */ 1090 slab_state = UP; 1091 1092 #ifdef CONFIG_ZONE_DMA 1093 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { 1094 struct kmem_cache *s = kmalloc_caches[i]; 1095 1096 if (s) { 1097 int size = kmalloc_size(i); 1098 char *n = kasprintf(GFP_NOWAIT, 1099 "dma-kmalloc-%d", size); 1100 1101 BUG_ON(!n); 1102 kmalloc_dma_caches[i] = create_kmalloc_cache(n, 1103 size, SLAB_CACHE_DMA | flags); 1104 } 1105 } 1106 #endif 1107 } 1108 #endif /* !CONFIG_SLOB */ 1109 1110 /* 1111 * To avoid unnecessary overhead, we pass through large allocation requests 1112 * directly to the page allocator. We use __GFP_COMP, because we will need to 1113 * know the allocation order to free the pages properly in kfree. 1114 */ 1115 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) 1116 { 1117 void *ret; 1118 struct page *page; 1119 1120 flags |= __GFP_COMP; 1121 page = alloc_pages(flags, order); 1122 ret = page ? page_address(page) : NULL; 1123 kmemleak_alloc(ret, size, 1, flags); 1124 kasan_kmalloc_large(ret, size, flags); 1125 return ret; 1126 } 1127 EXPORT_SYMBOL(kmalloc_order); 1128 1129 #ifdef CONFIG_TRACING 1130 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 1131 { 1132 void *ret = kmalloc_order(size, flags, order); 1133 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 1134 return ret; 1135 } 1136 EXPORT_SYMBOL(kmalloc_order_trace); 1137 #endif 1138 1139 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1140 /* Randomize a generic freelist */ 1141 static void freelist_randomize(struct rnd_state *state, unsigned int *list, 1142 size_t count) 1143 { 1144 size_t i; 1145 unsigned int rand; 1146 1147 for (i = 0; i < count; i++) 1148 list[i] = i; 1149 1150 /* Fisher-Yates shuffle */ 1151 for (i = count - 1; i > 0; i--) { 1152 rand = prandom_u32_state(state); 1153 rand %= (i + 1); 1154 swap(list[i], list[rand]); 1155 } 1156 } 1157 1158 /* Create a random sequence per cache */ 1159 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 1160 gfp_t gfp) 1161 { 1162 struct rnd_state state; 1163 1164 if (count < 2 || cachep->random_seq) 1165 return 0; 1166 1167 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); 1168 if (!cachep->random_seq) 1169 return -ENOMEM; 1170 1171 /* Get best entropy at this stage of boot */ 1172 prandom_seed_state(&state, get_random_long()); 1173 1174 freelist_randomize(&state, cachep->random_seq, count); 1175 return 0; 1176 } 1177 1178 /* Destroy the per-cache random freelist sequence */ 1179 void cache_random_seq_destroy(struct kmem_cache *cachep) 1180 { 1181 kfree(cachep->random_seq); 1182 cachep->random_seq = NULL; 1183 } 1184 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1185 1186 #ifdef CONFIG_SLABINFO 1187 1188 #ifdef CONFIG_SLAB 1189 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR) 1190 #else 1191 #define SLABINFO_RIGHTS S_IRUSR 1192 #endif 1193 1194 static void print_slabinfo_header(struct seq_file *m) 1195 { 1196 /* 1197 * Output format version, so at least we can change it 1198 * without _too_ many complaints. 1199 */ 1200 #ifdef CONFIG_DEBUG_SLAB 1201 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 1202 #else 1203 seq_puts(m, "slabinfo - version: 2.1\n"); 1204 #endif 1205 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); 1206 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 1207 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 1208 #ifdef CONFIG_DEBUG_SLAB 1209 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 1210 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 1211 #endif 1212 seq_putc(m, '\n'); 1213 } 1214 1215 void *slab_start(struct seq_file *m, loff_t *pos) 1216 { 1217 mutex_lock(&slab_mutex); 1218 return seq_list_start(&slab_root_caches, *pos); 1219 } 1220 1221 void *slab_next(struct seq_file *m, void *p, loff_t *pos) 1222 { 1223 return seq_list_next(p, &slab_root_caches, pos); 1224 } 1225 1226 void slab_stop(struct seq_file *m, void *p) 1227 { 1228 mutex_unlock(&slab_mutex); 1229 } 1230 1231 static void 1232 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) 1233 { 1234 struct kmem_cache *c; 1235 struct slabinfo sinfo; 1236 1237 if (!is_root_cache(s)) 1238 return; 1239 1240 for_each_memcg_cache(c, s) { 1241 memset(&sinfo, 0, sizeof(sinfo)); 1242 get_slabinfo(c, &sinfo); 1243 1244 info->active_slabs += sinfo.active_slabs; 1245 info->num_slabs += sinfo.num_slabs; 1246 info->shared_avail += sinfo.shared_avail; 1247 info->active_objs += sinfo.active_objs; 1248 info->num_objs += sinfo.num_objs; 1249 } 1250 } 1251 1252 static void cache_show(struct kmem_cache *s, struct seq_file *m) 1253 { 1254 struct slabinfo sinfo; 1255 1256 memset(&sinfo, 0, sizeof(sinfo)); 1257 get_slabinfo(s, &sinfo); 1258 1259 memcg_accumulate_slabinfo(s, &sinfo); 1260 1261 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 1262 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, 1263 sinfo.objects_per_slab, (1 << sinfo.cache_order)); 1264 1265 seq_printf(m, " : tunables %4u %4u %4u", 1266 sinfo.limit, sinfo.batchcount, sinfo.shared); 1267 seq_printf(m, " : slabdata %6lu %6lu %6lu", 1268 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 1269 slabinfo_show_stats(m, s); 1270 seq_putc(m, '\n'); 1271 } 1272 1273 static int slab_show(struct seq_file *m, void *p) 1274 { 1275 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node); 1276 1277 if (p == slab_root_caches.next) 1278 print_slabinfo_header(m); 1279 cache_show(s, m); 1280 return 0; 1281 } 1282 1283 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 1284 void *memcg_slab_start(struct seq_file *m, loff_t *pos) 1285 { 1286 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1287 1288 mutex_lock(&slab_mutex); 1289 return seq_list_start(&memcg->kmem_caches, *pos); 1290 } 1291 1292 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos) 1293 { 1294 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1295 1296 return seq_list_next(p, &memcg->kmem_caches, pos); 1297 } 1298 1299 void memcg_slab_stop(struct seq_file *m, void *p) 1300 { 1301 mutex_unlock(&slab_mutex); 1302 } 1303 1304 int memcg_slab_show(struct seq_file *m, void *p) 1305 { 1306 struct kmem_cache *s = list_entry(p, struct kmem_cache, 1307 memcg_params.kmem_caches_node); 1308 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1309 1310 if (p == memcg->kmem_caches.next) 1311 print_slabinfo_header(m); 1312 cache_show(s, m); 1313 return 0; 1314 } 1315 #endif 1316 1317 /* 1318 * slabinfo_op - iterator that generates /proc/slabinfo 1319 * 1320 * Output layout: 1321 * cache-name 1322 * num-active-objs 1323 * total-objs 1324 * object size 1325 * num-active-slabs 1326 * total-slabs 1327 * num-pages-per-slab 1328 * + further values on SMP and with statistics enabled 1329 */ 1330 static const struct seq_operations slabinfo_op = { 1331 .start = slab_start, 1332 .next = slab_next, 1333 .stop = slab_stop, 1334 .show = slab_show, 1335 }; 1336 1337 static int slabinfo_open(struct inode *inode, struct file *file) 1338 { 1339 return seq_open(file, &slabinfo_op); 1340 } 1341 1342 static const struct file_operations proc_slabinfo_operations = { 1343 .open = slabinfo_open, 1344 .read = seq_read, 1345 .write = slabinfo_write, 1346 .llseek = seq_lseek, 1347 .release = seq_release, 1348 }; 1349 1350 static int __init slab_proc_init(void) 1351 { 1352 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, 1353 &proc_slabinfo_operations); 1354 return 0; 1355 } 1356 module_init(slab_proc_init); 1357 #endif /* CONFIG_SLABINFO */ 1358 1359 static __always_inline void *__do_krealloc(const void *p, size_t new_size, 1360 gfp_t flags) 1361 { 1362 void *ret; 1363 size_t ks = 0; 1364 1365 if (p) 1366 ks = ksize(p); 1367 1368 if (ks >= new_size) { 1369 kasan_krealloc((void *)p, new_size, flags); 1370 return (void *)p; 1371 } 1372 1373 ret = kmalloc_track_caller(new_size, flags); 1374 if (ret && p) 1375 memcpy(ret, p, ks); 1376 1377 return ret; 1378 } 1379 1380 /** 1381 * __krealloc - like krealloc() but don't free @p. 1382 * @p: object to reallocate memory for. 1383 * @new_size: how many bytes of memory are required. 1384 * @flags: the type of memory to allocate. 1385 * 1386 * This function is like krealloc() except it never frees the originally 1387 * allocated buffer. Use this if you don't want to free the buffer immediately 1388 * like, for example, with RCU. 1389 */ 1390 void *__krealloc(const void *p, size_t new_size, gfp_t flags) 1391 { 1392 if (unlikely(!new_size)) 1393 return ZERO_SIZE_PTR; 1394 1395 return __do_krealloc(p, new_size, flags); 1396 1397 } 1398 EXPORT_SYMBOL(__krealloc); 1399 1400 /** 1401 * krealloc - reallocate memory. The contents will remain unchanged. 1402 * @p: object to reallocate memory for. 1403 * @new_size: how many bytes of memory are required. 1404 * @flags: the type of memory to allocate. 1405 * 1406 * The contents of the object pointed to are preserved up to the 1407 * lesser of the new and old sizes. If @p is %NULL, krealloc() 1408 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a 1409 * %NULL pointer, the object pointed to is freed. 1410 */ 1411 void *krealloc(const void *p, size_t new_size, gfp_t flags) 1412 { 1413 void *ret; 1414 1415 if (unlikely(!new_size)) { 1416 kfree(p); 1417 return ZERO_SIZE_PTR; 1418 } 1419 1420 ret = __do_krealloc(p, new_size, flags); 1421 if (ret && p != ret) 1422 kfree(p); 1423 1424 return ret; 1425 } 1426 EXPORT_SYMBOL(krealloc); 1427 1428 /** 1429 * kzfree - like kfree but zero memory 1430 * @p: object to free memory of 1431 * 1432 * The memory of the object @p points to is zeroed before freed. 1433 * If @p is %NULL, kzfree() does nothing. 1434 * 1435 * Note: this function zeroes the whole allocated buffer which can be a good 1436 * deal bigger than the requested buffer size passed to kmalloc(). So be 1437 * careful when using this function in performance sensitive code. 1438 */ 1439 void kzfree(const void *p) 1440 { 1441 size_t ks; 1442 void *mem = (void *)p; 1443 1444 if (unlikely(ZERO_OR_NULL_PTR(mem))) 1445 return; 1446 ks = ksize(mem); 1447 memset(mem, 0, ks); 1448 kfree(mem); 1449 } 1450 EXPORT_SYMBOL(kzfree); 1451 1452 /* Tracepoints definitions. */ 1453 EXPORT_TRACEPOINT_SYMBOL(kmalloc); 1454 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 1455 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); 1456 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); 1457 EXPORT_TRACEPOINT_SYMBOL(kfree); 1458 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); 1459