1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/mempolicy.h> 28 #include <linux/ctype.h> 29 #include <linux/stackdepot.h> 30 #include <linux/debugobjects.h> 31 #include <linux/kallsyms.h> 32 #include <linux/kfence.h> 33 #include <linux/memory.h> 34 #include <linux/math64.h> 35 #include <linux/fault-inject.h> 36 #include <linux/stacktrace.h> 37 #include <linux/prefetch.h> 38 #include <linux/memcontrol.h> 39 #include <linux/random.h> 40 #include <kunit/test.h> 41 #include <linux/sort.h> 42 43 #include <linux/debugfs.h> 44 #include <trace/events/kmem.h> 45 46 #include "internal.h" 47 48 /* 49 * Lock order: 50 * 1. slab_mutex (Global Mutex) 51 * 2. node->list_lock (Spinlock) 52 * 3. kmem_cache->cpu_slab->lock (Local lock) 53 * 4. slab_lock(slab) (Only on some arches or for debugging) 54 * 5. object_map_lock (Only for debugging) 55 * 56 * slab_mutex 57 * 58 * The role of the slab_mutex is to protect the list of all the slabs 59 * and to synchronize major metadata changes to slab cache structures. 60 * Also synchronizes memory hotplug callbacks. 61 * 62 * slab_lock 63 * 64 * The slab_lock is a wrapper around the page lock, thus it is a bit 65 * spinlock. 66 * 67 * The slab_lock is only used for debugging and on arches that do not 68 * have the ability to do a cmpxchg_double. It only protects: 69 * A. slab->freelist -> List of free objects in a slab 70 * B. slab->inuse -> Number of objects in use 71 * C. slab->objects -> Number of objects in slab 72 * D. slab->frozen -> frozen state 73 * 74 * Frozen slabs 75 * 76 * If a slab is frozen then it is exempt from list management. It is not 77 * on any list except per cpu partial list. The processor that froze the 78 * slab is the one who can perform list operations on the slab. Other 79 * processors may put objects onto the freelist but the processor that 80 * froze the slab is the only one that can retrieve the objects from the 81 * slab's freelist. 82 * 83 * list_lock 84 * 85 * The list_lock protects the partial and full list on each node and 86 * the partial slab counter. If taken then no new slabs may be added or 87 * removed from the lists nor make the number of partial slabs be modified. 88 * (Note that the total number of slabs is an atomic value that may be 89 * modified without taking the list lock). 90 * 91 * The list_lock is a centralized lock and thus we avoid taking it as 92 * much as possible. As long as SLUB does not have to handle partial 93 * slabs, operations can continue without any centralized lock. F.e. 94 * allocating a long series of objects that fill up slabs does not require 95 * the list lock. 96 * 97 * cpu_slab->lock local lock 98 * 99 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 100 * except the stat counters. This is a percpu structure manipulated only by 101 * the local cpu, so the lock protects against being preempted or interrupted 102 * by an irq. Fast path operations rely on lockless operations instead. 103 * On PREEMPT_RT, the local lock does not actually disable irqs (and thus 104 * prevent the lockless operations), so fastpath operations also need to take 105 * the lock and are no longer lockless. 106 * 107 * lockless fastpaths 108 * 109 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 110 * are fully lockless when satisfied from the percpu slab (and when 111 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 112 * They also don't disable preemption or migration or irqs. They rely on 113 * the transaction id (tid) field to detect being preempted or moved to 114 * another cpu. 115 * 116 * irq, preemption, migration considerations 117 * 118 * Interrupts are disabled as part of list_lock or local_lock operations, or 119 * around the slab_lock operation, in order to make the slab allocator safe 120 * to use in the context of an irq. 121 * 122 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 123 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 124 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 125 * doesn't have to be revalidated in each section protected by the local lock. 126 * 127 * SLUB assigns one slab for allocation to each processor. 128 * Allocations only occur from these slabs called cpu slabs. 129 * 130 * Slabs with free elements are kept on a partial list and during regular 131 * operations no list for full slabs is used. If an object in a full slab is 132 * freed then the slab will show up again on the partial lists. 133 * We track full slabs for debugging purposes though because otherwise we 134 * cannot scan all objects. 135 * 136 * Slabs are freed when they become empty. Teardown and setup is 137 * minimal so we rely on the page allocators per cpu caches for 138 * fast frees and allocs. 139 * 140 * slab->frozen The slab is frozen and exempt from list processing. 141 * This means that the slab is dedicated to a purpose 142 * such as satisfying allocations for a specific 143 * processor. Objects may be freed in the slab while 144 * it is frozen but slab_free will then skip the usual 145 * list operations. It is up to the processor holding 146 * the slab to integrate the slab into the slab lists 147 * when the slab is no longer needed. 148 * 149 * One use of this flag is to mark slabs that are 150 * used for allocations. Then such a slab becomes a cpu 151 * slab. The cpu slab may be equipped with an additional 152 * freelist that allows lockless access to 153 * free objects in addition to the regular freelist 154 * that requires the slab lock. 155 * 156 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 157 * options set. This moves slab handling out of 158 * the fast path and disables lockless freelists. 159 */ 160 161 /* 162 * We could simply use migrate_disable()/enable() but as long as it's a 163 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 164 */ 165 #ifndef CONFIG_PREEMPT_RT 166 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 167 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 168 #else 169 #define slub_get_cpu_ptr(var) \ 170 ({ \ 171 migrate_disable(); \ 172 this_cpu_ptr(var); \ 173 }) 174 #define slub_put_cpu_ptr(var) \ 175 do { \ 176 (void)(var); \ 177 migrate_enable(); \ 178 } while (0) 179 #endif 180 181 #ifdef CONFIG_SLUB_DEBUG 182 #ifdef CONFIG_SLUB_DEBUG_ON 183 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 184 #else 185 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 186 #endif 187 #endif /* CONFIG_SLUB_DEBUG */ 188 189 static inline bool kmem_cache_debug(struct kmem_cache *s) 190 { 191 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 192 } 193 194 void *fixup_red_left(struct kmem_cache *s, void *p) 195 { 196 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 197 p += s->red_left_pad; 198 199 return p; 200 } 201 202 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 203 { 204 #ifdef CONFIG_SLUB_CPU_PARTIAL 205 return !kmem_cache_debug(s); 206 #else 207 return false; 208 #endif 209 } 210 211 /* 212 * Issues still to be resolved: 213 * 214 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 215 * 216 * - Variable sizing of the per node arrays 217 */ 218 219 /* Enable to log cmpxchg failures */ 220 #undef SLUB_DEBUG_CMPXCHG 221 222 /* 223 * Minimum number of partial slabs. These will be left on the partial 224 * lists even if they are empty. kmem_cache_shrink may reclaim them. 225 */ 226 #define MIN_PARTIAL 5 227 228 /* 229 * Maximum number of desirable partial slabs. 230 * The existence of more partial slabs makes kmem_cache_shrink 231 * sort the partial list by the number of objects in use. 232 */ 233 #define MAX_PARTIAL 10 234 235 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 236 SLAB_POISON | SLAB_STORE_USER) 237 238 /* 239 * These debug flags cannot use CMPXCHG because there might be consistency 240 * issues when checking or reading debug information 241 */ 242 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 243 SLAB_TRACE) 244 245 246 /* 247 * Debugging flags that require metadata to be stored in the slab. These get 248 * disabled when slub_debug=O is used and a cache's min order increases with 249 * metadata. 250 */ 251 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 252 253 #define OO_SHIFT 16 254 #define OO_MASK ((1 << OO_SHIFT) - 1) 255 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 256 257 /* Internal SLUB flags */ 258 /* Poison object */ 259 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 260 /* Use cmpxchg_double */ 261 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 262 263 /* 264 * Tracking user of a slab. 265 */ 266 #define TRACK_ADDRS_COUNT 16 267 struct track { 268 unsigned long addr; /* Called from address */ 269 #ifdef CONFIG_STACKDEPOT 270 depot_stack_handle_t handle; 271 #endif 272 int cpu; /* Was running on cpu */ 273 int pid; /* Pid context */ 274 unsigned long when; /* When did the operation occur */ 275 }; 276 277 enum track_item { TRACK_ALLOC, TRACK_FREE }; 278 279 #ifdef CONFIG_SYSFS 280 static int sysfs_slab_add(struct kmem_cache *); 281 static int sysfs_slab_alias(struct kmem_cache *, const char *); 282 #else 283 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 284 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 285 { return 0; } 286 #endif 287 288 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 289 static void debugfs_slab_add(struct kmem_cache *); 290 #else 291 static inline void debugfs_slab_add(struct kmem_cache *s) { } 292 #endif 293 294 static inline void stat(const struct kmem_cache *s, enum stat_item si) 295 { 296 #ifdef CONFIG_SLUB_STATS 297 /* 298 * The rmw is racy on a preemptible kernel but this is acceptable, so 299 * avoid this_cpu_add()'s irq-disable overhead. 300 */ 301 raw_cpu_inc(s->cpu_slab->stat[si]); 302 #endif 303 } 304 305 /* 306 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 307 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 308 * differ during memory hotplug/hotremove operations. 309 * Protected by slab_mutex. 310 */ 311 static nodemask_t slab_nodes; 312 313 /******************************************************************** 314 * Core slab cache functions 315 *******************************************************************/ 316 317 /* 318 * Returns freelist pointer (ptr). With hardening, this is obfuscated 319 * with an XOR of the address where the pointer is held and a per-cache 320 * random number. 321 */ 322 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 323 unsigned long ptr_addr) 324 { 325 #ifdef CONFIG_SLAB_FREELIST_HARDENED 326 /* 327 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 328 * Normally, this doesn't cause any issues, as both set_freepointer() 329 * and get_freepointer() are called with a pointer with the same tag. 330 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 331 * example, when __free_slub() iterates over objects in a cache, it 332 * passes untagged pointers to check_object(). check_object() in turns 333 * calls get_freepointer() with an untagged pointer, which causes the 334 * freepointer to be restored incorrectly. 335 */ 336 return (void *)((unsigned long)ptr ^ s->random ^ 337 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 338 #else 339 return ptr; 340 #endif 341 } 342 343 /* Returns the freelist pointer recorded at location ptr_addr. */ 344 static inline void *freelist_dereference(const struct kmem_cache *s, 345 void *ptr_addr) 346 { 347 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 348 (unsigned long)ptr_addr); 349 } 350 351 static inline void *get_freepointer(struct kmem_cache *s, void *object) 352 { 353 object = kasan_reset_tag(object); 354 return freelist_dereference(s, object + s->offset); 355 } 356 357 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 358 { 359 prefetchw(object + s->offset); 360 } 361 362 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 363 { 364 unsigned long freepointer_addr; 365 void *p; 366 367 if (!debug_pagealloc_enabled_static()) 368 return get_freepointer(s, object); 369 370 object = kasan_reset_tag(object); 371 freepointer_addr = (unsigned long)object + s->offset; 372 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 373 return freelist_ptr(s, p, freepointer_addr); 374 } 375 376 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 377 { 378 unsigned long freeptr_addr = (unsigned long)object + s->offset; 379 380 #ifdef CONFIG_SLAB_FREELIST_HARDENED 381 BUG_ON(object == fp); /* naive detection of double free or corruption */ 382 #endif 383 384 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 385 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 386 } 387 388 /* Loop over all objects in a slab */ 389 #define for_each_object(__p, __s, __addr, __objects) \ 390 for (__p = fixup_red_left(__s, __addr); \ 391 __p < (__addr) + (__objects) * (__s)->size; \ 392 __p += (__s)->size) 393 394 static inline unsigned int order_objects(unsigned int order, unsigned int size) 395 { 396 return ((unsigned int)PAGE_SIZE << order) / size; 397 } 398 399 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 400 unsigned int size) 401 { 402 struct kmem_cache_order_objects x = { 403 (order << OO_SHIFT) + order_objects(order, size) 404 }; 405 406 return x; 407 } 408 409 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 410 { 411 return x.x >> OO_SHIFT; 412 } 413 414 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 415 { 416 return x.x & OO_MASK; 417 } 418 419 #ifdef CONFIG_SLUB_CPU_PARTIAL 420 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 421 { 422 unsigned int nr_slabs; 423 424 s->cpu_partial = nr_objects; 425 426 /* 427 * We take the number of objects but actually limit the number of 428 * slabs on the per cpu partial list, in order to limit excessive 429 * growth of the list. For simplicity we assume that the slabs will 430 * be half-full. 431 */ 432 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 433 s->cpu_partial_slabs = nr_slabs; 434 } 435 #else 436 static inline void 437 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 438 { 439 } 440 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 441 442 /* 443 * Per slab locking using the pagelock 444 */ 445 static __always_inline void __slab_lock(struct slab *slab) 446 { 447 struct page *page = slab_page(slab); 448 449 VM_BUG_ON_PAGE(PageTail(page), page); 450 bit_spin_lock(PG_locked, &page->flags); 451 } 452 453 static __always_inline void __slab_unlock(struct slab *slab) 454 { 455 struct page *page = slab_page(slab); 456 457 VM_BUG_ON_PAGE(PageTail(page), page); 458 __bit_spin_unlock(PG_locked, &page->flags); 459 } 460 461 static __always_inline void slab_lock(struct slab *slab, unsigned long *flags) 462 { 463 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 464 local_irq_save(*flags); 465 __slab_lock(slab); 466 } 467 468 static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags) 469 { 470 __slab_unlock(slab); 471 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 472 local_irq_restore(*flags); 473 } 474 475 /* 476 * Interrupts must be disabled (for the fallback code to work right), typically 477 * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different 478 * so we disable interrupts as part of slab_[un]lock(). 479 */ 480 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, 481 void *freelist_old, unsigned long counters_old, 482 void *freelist_new, unsigned long counters_new, 483 const char *n) 484 { 485 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 486 lockdep_assert_irqs_disabled(); 487 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 488 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 489 if (s->flags & __CMPXCHG_DOUBLE) { 490 if (cmpxchg_double(&slab->freelist, &slab->counters, 491 freelist_old, counters_old, 492 freelist_new, counters_new)) 493 return true; 494 } else 495 #endif 496 { 497 /* init to 0 to prevent spurious warnings */ 498 unsigned long flags = 0; 499 500 slab_lock(slab, &flags); 501 if (slab->freelist == freelist_old && 502 slab->counters == counters_old) { 503 slab->freelist = freelist_new; 504 slab->counters = counters_new; 505 slab_unlock(slab, &flags); 506 return true; 507 } 508 slab_unlock(slab, &flags); 509 } 510 511 cpu_relax(); 512 stat(s, CMPXCHG_DOUBLE_FAIL); 513 514 #ifdef SLUB_DEBUG_CMPXCHG 515 pr_info("%s %s: cmpxchg double redo ", n, s->name); 516 #endif 517 518 return false; 519 } 520 521 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, 522 void *freelist_old, unsigned long counters_old, 523 void *freelist_new, unsigned long counters_new, 524 const char *n) 525 { 526 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 527 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 528 if (s->flags & __CMPXCHG_DOUBLE) { 529 if (cmpxchg_double(&slab->freelist, &slab->counters, 530 freelist_old, counters_old, 531 freelist_new, counters_new)) 532 return true; 533 } else 534 #endif 535 { 536 unsigned long flags; 537 538 local_irq_save(flags); 539 __slab_lock(slab); 540 if (slab->freelist == freelist_old && 541 slab->counters == counters_old) { 542 slab->freelist = freelist_new; 543 slab->counters = counters_new; 544 __slab_unlock(slab); 545 local_irq_restore(flags); 546 return true; 547 } 548 __slab_unlock(slab); 549 local_irq_restore(flags); 550 } 551 552 cpu_relax(); 553 stat(s, CMPXCHG_DOUBLE_FAIL); 554 555 #ifdef SLUB_DEBUG_CMPXCHG 556 pr_info("%s %s: cmpxchg double redo ", n, s->name); 557 #endif 558 559 return false; 560 } 561 562 #ifdef CONFIG_SLUB_DEBUG 563 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 564 static DEFINE_RAW_SPINLOCK(object_map_lock); 565 566 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 567 struct slab *slab) 568 { 569 void *addr = slab_address(slab); 570 void *p; 571 572 bitmap_zero(obj_map, slab->objects); 573 574 for (p = slab->freelist; p; p = get_freepointer(s, p)) 575 set_bit(__obj_to_index(s, addr, p), obj_map); 576 } 577 578 #if IS_ENABLED(CONFIG_KUNIT) 579 static bool slab_add_kunit_errors(void) 580 { 581 struct kunit_resource *resource; 582 583 if (likely(!current->kunit_test)) 584 return false; 585 586 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 587 if (!resource) 588 return false; 589 590 (*(int *)resource->data)++; 591 kunit_put_resource(resource); 592 return true; 593 } 594 #else 595 static inline bool slab_add_kunit_errors(void) { return false; } 596 #endif 597 598 /* 599 * Determine a map of objects in use in a slab. 600 * 601 * Node listlock must be held to guarantee that the slab does 602 * not vanish from under us. 603 */ 604 static unsigned long *get_map(struct kmem_cache *s, struct slab *slab) 605 __acquires(&object_map_lock) 606 { 607 VM_BUG_ON(!irqs_disabled()); 608 609 raw_spin_lock(&object_map_lock); 610 611 __fill_map(object_map, s, slab); 612 613 return object_map; 614 } 615 616 static void put_map(unsigned long *map) __releases(&object_map_lock) 617 { 618 VM_BUG_ON(map != object_map); 619 raw_spin_unlock(&object_map_lock); 620 } 621 622 static inline unsigned int size_from_object(struct kmem_cache *s) 623 { 624 if (s->flags & SLAB_RED_ZONE) 625 return s->size - s->red_left_pad; 626 627 return s->size; 628 } 629 630 static inline void *restore_red_left(struct kmem_cache *s, void *p) 631 { 632 if (s->flags & SLAB_RED_ZONE) 633 p -= s->red_left_pad; 634 635 return p; 636 } 637 638 /* 639 * Debug settings: 640 */ 641 #if defined(CONFIG_SLUB_DEBUG_ON) 642 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 643 #else 644 static slab_flags_t slub_debug; 645 #endif 646 647 static char *slub_debug_string; 648 static int disable_higher_order_debug; 649 650 /* 651 * slub is about to manipulate internal object metadata. This memory lies 652 * outside the range of the allocated object, so accessing it would normally 653 * be reported by kasan as a bounds error. metadata_access_enable() is used 654 * to tell kasan that these accesses are OK. 655 */ 656 static inline void metadata_access_enable(void) 657 { 658 kasan_disable_current(); 659 } 660 661 static inline void metadata_access_disable(void) 662 { 663 kasan_enable_current(); 664 } 665 666 /* 667 * Object debugging 668 */ 669 670 /* Verify that a pointer has an address that is valid within a slab page */ 671 static inline int check_valid_pointer(struct kmem_cache *s, 672 struct slab *slab, void *object) 673 { 674 void *base; 675 676 if (!object) 677 return 1; 678 679 base = slab_address(slab); 680 object = kasan_reset_tag(object); 681 object = restore_red_left(s, object); 682 if (object < base || object >= base + slab->objects * s->size || 683 (object - base) % s->size) { 684 return 0; 685 } 686 687 return 1; 688 } 689 690 static void print_section(char *level, char *text, u8 *addr, 691 unsigned int length) 692 { 693 metadata_access_enable(); 694 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 695 16, 1, kasan_reset_tag((void *)addr), length, 1); 696 metadata_access_disable(); 697 } 698 699 /* 700 * See comment in calculate_sizes(). 701 */ 702 static inline bool freeptr_outside_object(struct kmem_cache *s) 703 { 704 return s->offset >= s->inuse; 705 } 706 707 /* 708 * Return offset of the end of info block which is inuse + free pointer if 709 * not overlapping with object. 710 */ 711 static inline unsigned int get_info_end(struct kmem_cache *s) 712 { 713 if (freeptr_outside_object(s)) 714 return s->inuse + sizeof(void *); 715 else 716 return s->inuse; 717 } 718 719 static struct track *get_track(struct kmem_cache *s, void *object, 720 enum track_item alloc) 721 { 722 struct track *p; 723 724 p = object + get_info_end(s); 725 726 return kasan_reset_tag(p + alloc); 727 } 728 729 #ifdef CONFIG_STACKDEPOT 730 static noinline depot_stack_handle_t set_track_prepare(void) 731 { 732 depot_stack_handle_t handle; 733 unsigned long entries[TRACK_ADDRS_COUNT]; 734 unsigned int nr_entries; 735 736 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 737 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 738 739 return handle; 740 } 741 #else 742 static inline depot_stack_handle_t set_track_prepare(void) 743 { 744 return 0; 745 } 746 #endif 747 748 static void set_track_update(struct kmem_cache *s, void *object, 749 enum track_item alloc, unsigned long addr, 750 depot_stack_handle_t handle) 751 { 752 struct track *p = get_track(s, object, alloc); 753 754 #ifdef CONFIG_STACKDEPOT 755 p->handle = handle; 756 #endif 757 p->addr = addr; 758 p->cpu = smp_processor_id(); 759 p->pid = current->pid; 760 p->when = jiffies; 761 } 762 763 static __always_inline void set_track(struct kmem_cache *s, void *object, 764 enum track_item alloc, unsigned long addr) 765 { 766 depot_stack_handle_t handle = set_track_prepare(); 767 768 set_track_update(s, object, alloc, addr, handle); 769 } 770 771 static void init_tracking(struct kmem_cache *s, void *object) 772 { 773 struct track *p; 774 775 if (!(s->flags & SLAB_STORE_USER)) 776 return; 777 778 p = get_track(s, object, TRACK_ALLOC); 779 memset(p, 0, 2*sizeof(struct track)); 780 } 781 782 static void print_track(const char *s, struct track *t, unsigned long pr_time) 783 { 784 depot_stack_handle_t handle __maybe_unused; 785 786 if (!t->addr) 787 return; 788 789 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 790 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 791 #ifdef CONFIG_STACKDEPOT 792 handle = READ_ONCE(t->handle); 793 if (handle) 794 stack_depot_print(handle); 795 else 796 pr_err("object allocation/free stack trace missing\n"); 797 #endif 798 } 799 800 void print_tracking(struct kmem_cache *s, void *object) 801 { 802 unsigned long pr_time = jiffies; 803 if (!(s->flags & SLAB_STORE_USER)) 804 return; 805 806 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 807 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 808 } 809 810 static void print_slab_info(const struct slab *slab) 811 { 812 struct folio *folio = (struct folio *)slab_folio(slab); 813 814 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 815 slab, slab->objects, slab->inuse, slab->freelist, 816 folio_flags(folio, 0)); 817 } 818 819 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 820 { 821 struct va_format vaf; 822 va_list args; 823 824 va_start(args, fmt); 825 vaf.fmt = fmt; 826 vaf.va = &args; 827 pr_err("=============================================================================\n"); 828 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 829 pr_err("-----------------------------------------------------------------------------\n\n"); 830 va_end(args); 831 } 832 833 __printf(2, 3) 834 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 835 { 836 struct va_format vaf; 837 va_list args; 838 839 if (slab_add_kunit_errors()) 840 return; 841 842 va_start(args, fmt); 843 vaf.fmt = fmt; 844 vaf.va = &args; 845 pr_err("FIX %s: %pV\n", s->name, &vaf); 846 va_end(args); 847 } 848 849 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 850 { 851 unsigned int off; /* Offset of last byte */ 852 u8 *addr = slab_address(slab); 853 854 print_tracking(s, p); 855 856 print_slab_info(slab); 857 858 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 859 p, p - addr, get_freepointer(s, p)); 860 861 if (s->flags & SLAB_RED_ZONE) 862 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 863 s->red_left_pad); 864 else if (p > addr + 16) 865 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 866 867 print_section(KERN_ERR, "Object ", p, 868 min_t(unsigned int, s->object_size, PAGE_SIZE)); 869 if (s->flags & SLAB_RED_ZONE) 870 print_section(KERN_ERR, "Redzone ", p + s->object_size, 871 s->inuse - s->object_size); 872 873 off = get_info_end(s); 874 875 if (s->flags & SLAB_STORE_USER) 876 off += 2 * sizeof(struct track); 877 878 off += kasan_metadata_size(s); 879 880 if (off != size_from_object(s)) 881 /* Beginning of the filler is the free pointer */ 882 print_section(KERN_ERR, "Padding ", p + off, 883 size_from_object(s) - off); 884 885 dump_stack(); 886 } 887 888 static void object_err(struct kmem_cache *s, struct slab *slab, 889 u8 *object, char *reason) 890 { 891 if (slab_add_kunit_errors()) 892 return; 893 894 slab_bug(s, "%s", reason); 895 print_trailer(s, slab, object); 896 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 897 } 898 899 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 900 void **freelist, void *nextfree) 901 { 902 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 903 !check_valid_pointer(s, slab, nextfree) && freelist) { 904 object_err(s, slab, *freelist, "Freechain corrupt"); 905 *freelist = NULL; 906 slab_fix(s, "Isolate corrupted freechain"); 907 return true; 908 } 909 910 return false; 911 } 912 913 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 914 const char *fmt, ...) 915 { 916 va_list args; 917 char buf[100]; 918 919 if (slab_add_kunit_errors()) 920 return; 921 922 va_start(args, fmt); 923 vsnprintf(buf, sizeof(buf), fmt, args); 924 va_end(args); 925 slab_bug(s, "%s", buf); 926 print_slab_info(slab); 927 dump_stack(); 928 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 929 } 930 931 static void init_object(struct kmem_cache *s, void *object, u8 val) 932 { 933 u8 *p = kasan_reset_tag(object); 934 935 if (s->flags & SLAB_RED_ZONE) 936 memset(p - s->red_left_pad, val, s->red_left_pad); 937 938 if (s->flags & __OBJECT_POISON) { 939 memset(p, POISON_FREE, s->object_size - 1); 940 p[s->object_size - 1] = POISON_END; 941 } 942 943 if (s->flags & SLAB_RED_ZONE) 944 memset(p + s->object_size, val, s->inuse - s->object_size); 945 } 946 947 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 948 void *from, void *to) 949 { 950 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 951 memset(from, data, to - from); 952 } 953 954 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 955 u8 *object, char *what, 956 u8 *start, unsigned int value, unsigned int bytes) 957 { 958 u8 *fault; 959 u8 *end; 960 u8 *addr = slab_address(slab); 961 962 metadata_access_enable(); 963 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 964 metadata_access_disable(); 965 if (!fault) 966 return 1; 967 968 end = start + bytes; 969 while (end > fault && end[-1] == value) 970 end--; 971 972 if (slab_add_kunit_errors()) 973 goto skip_bug_print; 974 975 slab_bug(s, "%s overwritten", what); 976 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 977 fault, end - 1, fault - addr, 978 fault[0], value); 979 print_trailer(s, slab, object); 980 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 981 982 skip_bug_print: 983 restore_bytes(s, what, value, fault, end); 984 return 0; 985 } 986 987 /* 988 * Object layout: 989 * 990 * object address 991 * Bytes of the object to be managed. 992 * If the freepointer may overlay the object then the free 993 * pointer is at the middle of the object. 994 * 995 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 996 * 0xa5 (POISON_END) 997 * 998 * object + s->object_size 999 * Padding to reach word boundary. This is also used for Redzoning. 1000 * Padding is extended by another word if Redzoning is enabled and 1001 * object_size == inuse. 1002 * 1003 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 1004 * 0xcc (RED_ACTIVE) for objects in use. 1005 * 1006 * object + s->inuse 1007 * Meta data starts here. 1008 * 1009 * A. Free pointer (if we cannot overwrite object on free) 1010 * B. Tracking data for SLAB_STORE_USER 1011 * C. Padding to reach required alignment boundary or at minimum 1012 * one word if debugging is on to be able to detect writes 1013 * before the word boundary. 1014 * 1015 * Padding is done using 0x5a (POISON_INUSE) 1016 * 1017 * object + s->size 1018 * Nothing is used beyond s->size. 1019 * 1020 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1021 * ignored. And therefore no slab options that rely on these boundaries 1022 * may be used with merged slabcaches. 1023 */ 1024 1025 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1026 { 1027 unsigned long off = get_info_end(s); /* The end of info */ 1028 1029 if (s->flags & SLAB_STORE_USER) 1030 /* We also have user information there */ 1031 off += 2 * sizeof(struct track); 1032 1033 off += kasan_metadata_size(s); 1034 1035 if (size_from_object(s) == off) 1036 return 1; 1037 1038 return check_bytes_and_report(s, slab, p, "Object padding", 1039 p + off, POISON_INUSE, size_from_object(s) - off); 1040 } 1041 1042 /* Check the pad bytes at the end of a slab page */ 1043 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) 1044 { 1045 u8 *start; 1046 u8 *fault; 1047 u8 *end; 1048 u8 *pad; 1049 int length; 1050 int remainder; 1051 1052 if (!(s->flags & SLAB_POISON)) 1053 return; 1054 1055 start = slab_address(slab); 1056 length = slab_size(slab); 1057 end = start + length; 1058 remainder = length % s->size; 1059 if (!remainder) 1060 return; 1061 1062 pad = end - remainder; 1063 metadata_access_enable(); 1064 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1065 metadata_access_disable(); 1066 if (!fault) 1067 return; 1068 while (end > fault && end[-1] == POISON_INUSE) 1069 end--; 1070 1071 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1072 fault, end - 1, fault - start); 1073 print_section(KERN_ERR, "Padding ", pad, remainder); 1074 1075 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1076 } 1077 1078 static int check_object(struct kmem_cache *s, struct slab *slab, 1079 void *object, u8 val) 1080 { 1081 u8 *p = object; 1082 u8 *endobject = object + s->object_size; 1083 1084 if (s->flags & SLAB_RED_ZONE) { 1085 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1086 object - s->red_left_pad, val, s->red_left_pad)) 1087 return 0; 1088 1089 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1090 endobject, val, s->inuse - s->object_size)) 1091 return 0; 1092 } else { 1093 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1094 check_bytes_and_report(s, slab, p, "Alignment padding", 1095 endobject, POISON_INUSE, 1096 s->inuse - s->object_size); 1097 } 1098 } 1099 1100 if (s->flags & SLAB_POISON) { 1101 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 1102 (!check_bytes_and_report(s, slab, p, "Poison", p, 1103 POISON_FREE, s->object_size - 1) || 1104 !check_bytes_and_report(s, slab, p, "End Poison", 1105 p + s->object_size - 1, POISON_END, 1))) 1106 return 0; 1107 /* 1108 * check_pad_bytes cleans up on its own. 1109 */ 1110 check_pad_bytes(s, slab, p); 1111 } 1112 1113 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1114 /* 1115 * Object and freepointer overlap. Cannot check 1116 * freepointer while object is allocated. 1117 */ 1118 return 1; 1119 1120 /* Check free pointer validity */ 1121 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1122 object_err(s, slab, p, "Freepointer corrupt"); 1123 /* 1124 * No choice but to zap it and thus lose the remainder 1125 * of the free objects in this slab. May cause 1126 * another error because the object count is now wrong. 1127 */ 1128 set_freepointer(s, p, NULL); 1129 return 0; 1130 } 1131 return 1; 1132 } 1133 1134 static int check_slab(struct kmem_cache *s, struct slab *slab) 1135 { 1136 int maxobj; 1137 1138 if (!folio_test_slab(slab_folio(slab))) { 1139 slab_err(s, slab, "Not a valid slab page"); 1140 return 0; 1141 } 1142 1143 maxobj = order_objects(slab_order(slab), s->size); 1144 if (slab->objects > maxobj) { 1145 slab_err(s, slab, "objects %u > max %u", 1146 slab->objects, maxobj); 1147 return 0; 1148 } 1149 if (slab->inuse > slab->objects) { 1150 slab_err(s, slab, "inuse %u > max %u", 1151 slab->inuse, slab->objects); 1152 return 0; 1153 } 1154 /* Slab_pad_check fixes things up after itself */ 1155 slab_pad_check(s, slab); 1156 return 1; 1157 } 1158 1159 /* 1160 * Determine if a certain object in a slab is on the freelist. Must hold the 1161 * slab lock to guarantee that the chains are in a consistent state. 1162 */ 1163 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1164 { 1165 int nr = 0; 1166 void *fp; 1167 void *object = NULL; 1168 int max_objects; 1169 1170 fp = slab->freelist; 1171 while (fp && nr <= slab->objects) { 1172 if (fp == search) 1173 return 1; 1174 if (!check_valid_pointer(s, slab, fp)) { 1175 if (object) { 1176 object_err(s, slab, object, 1177 "Freechain corrupt"); 1178 set_freepointer(s, object, NULL); 1179 } else { 1180 slab_err(s, slab, "Freepointer corrupt"); 1181 slab->freelist = NULL; 1182 slab->inuse = slab->objects; 1183 slab_fix(s, "Freelist cleared"); 1184 return 0; 1185 } 1186 break; 1187 } 1188 object = fp; 1189 fp = get_freepointer(s, object); 1190 nr++; 1191 } 1192 1193 max_objects = order_objects(slab_order(slab), s->size); 1194 if (max_objects > MAX_OBJS_PER_PAGE) 1195 max_objects = MAX_OBJS_PER_PAGE; 1196 1197 if (slab->objects != max_objects) { 1198 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1199 slab->objects, max_objects); 1200 slab->objects = max_objects; 1201 slab_fix(s, "Number of objects adjusted"); 1202 } 1203 if (slab->inuse != slab->objects - nr) { 1204 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1205 slab->inuse, slab->objects - nr); 1206 slab->inuse = slab->objects - nr; 1207 slab_fix(s, "Object count adjusted"); 1208 } 1209 return search == NULL; 1210 } 1211 1212 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1213 int alloc) 1214 { 1215 if (s->flags & SLAB_TRACE) { 1216 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1217 s->name, 1218 alloc ? "alloc" : "free", 1219 object, slab->inuse, 1220 slab->freelist); 1221 1222 if (!alloc) 1223 print_section(KERN_INFO, "Object ", (void *)object, 1224 s->object_size); 1225 1226 dump_stack(); 1227 } 1228 } 1229 1230 /* 1231 * Tracking of fully allocated slabs for debugging purposes. 1232 */ 1233 static void add_full(struct kmem_cache *s, 1234 struct kmem_cache_node *n, struct slab *slab) 1235 { 1236 if (!(s->flags & SLAB_STORE_USER)) 1237 return; 1238 1239 lockdep_assert_held(&n->list_lock); 1240 list_add(&slab->slab_list, &n->full); 1241 } 1242 1243 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1244 { 1245 if (!(s->flags & SLAB_STORE_USER)) 1246 return; 1247 1248 lockdep_assert_held(&n->list_lock); 1249 list_del(&slab->slab_list); 1250 } 1251 1252 /* Tracking of the number of slabs for debugging purposes */ 1253 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1254 { 1255 struct kmem_cache_node *n = get_node(s, node); 1256 1257 return atomic_long_read(&n->nr_slabs); 1258 } 1259 1260 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1261 { 1262 return atomic_long_read(&n->nr_slabs); 1263 } 1264 1265 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1266 { 1267 struct kmem_cache_node *n = get_node(s, node); 1268 1269 /* 1270 * May be called early in order to allocate a slab for the 1271 * kmem_cache_node structure. Solve the chicken-egg 1272 * dilemma by deferring the increment of the count during 1273 * bootstrap (see early_kmem_cache_node_alloc). 1274 */ 1275 if (likely(n)) { 1276 atomic_long_inc(&n->nr_slabs); 1277 atomic_long_add(objects, &n->total_objects); 1278 } 1279 } 1280 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1281 { 1282 struct kmem_cache_node *n = get_node(s, node); 1283 1284 atomic_long_dec(&n->nr_slabs); 1285 atomic_long_sub(objects, &n->total_objects); 1286 } 1287 1288 /* Object debug checks for alloc/free paths */ 1289 static void setup_object_debug(struct kmem_cache *s, void *object) 1290 { 1291 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1292 return; 1293 1294 init_object(s, object, SLUB_RED_INACTIVE); 1295 init_tracking(s, object); 1296 } 1297 1298 static 1299 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1300 { 1301 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1302 return; 1303 1304 metadata_access_enable(); 1305 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1306 metadata_access_disable(); 1307 } 1308 1309 static inline int alloc_consistency_checks(struct kmem_cache *s, 1310 struct slab *slab, void *object) 1311 { 1312 if (!check_slab(s, slab)) 1313 return 0; 1314 1315 if (!check_valid_pointer(s, slab, object)) { 1316 object_err(s, slab, object, "Freelist Pointer check fails"); 1317 return 0; 1318 } 1319 1320 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1321 return 0; 1322 1323 return 1; 1324 } 1325 1326 static noinline int alloc_debug_processing(struct kmem_cache *s, 1327 struct slab *slab, 1328 void *object, unsigned long addr) 1329 { 1330 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1331 if (!alloc_consistency_checks(s, slab, object)) 1332 goto bad; 1333 } 1334 1335 /* Success perform special debug activities for allocs */ 1336 if (s->flags & SLAB_STORE_USER) 1337 set_track(s, object, TRACK_ALLOC, addr); 1338 trace(s, slab, object, 1); 1339 init_object(s, object, SLUB_RED_ACTIVE); 1340 return 1; 1341 1342 bad: 1343 if (folio_test_slab(slab_folio(slab))) { 1344 /* 1345 * If this is a slab page then lets do the best we can 1346 * to avoid issues in the future. Marking all objects 1347 * as used avoids touching the remaining objects. 1348 */ 1349 slab_fix(s, "Marking all objects used"); 1350 slab->inuse = slab->objects; 1351 slab->freelist = NULL; 1352 } 1353 return 0; 1354 } 1355 1356 static inline int free_consistency_checks(struct kmem_cache *s, 1357 struct slab *slab, void *object, unsigned long addr) 1358 { 1359 if (!check_valid_pointer(s, slab, object)) { 1360 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1361 return 0; 1362 } 1363 1364 if (on_freelist(s, slab, object)) { 1365 object_err(s, slab, object, "Object already free"); 1366 return 0; 1367 } 1368 1369 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1370 return 0; 1371 1372 if (unlikely(s != slab->slab_cache)) { 1373 if (!folio_test_slab(slab_folio(slab))) { 1374 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1375 object); 1376 } else if (!slab->slab_cache) { 1377 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1378 object); 1379 dump_stack(); 1380 } else 1381 object_err(s, slab, object, 1382 "page slab pointer corrupt."); 1383 return 0; 1384 } 1385 return 1; 1386 } 1387 1388 /* Supports checking bulk free of a constructed freelist */ 1389 static noinline int free_debug_processing( 1390 struct kmem_cache *s, struct slab *slab, 1391 void *head, void *tail, int bulk_cnt, 1392 unsigned long addr) 1393 { 1394 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 1395 void *object = head; 1396 int cnt = 0; 1397 unsigned long flags, flags2; 1398 int ret = 0; 1399 depot_stack_handle_t handle = 0; 1400 1401 if (s->flags & SLAB_STORE_USER) 1402 handle = set_track_prepare(); 1403 1404 spin_lock_irqsave(&n->list_lock, flags); 1405 slab_lock(slab, &flags2); 1406 1407 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1408 if (!check_slab(s, slab)) 1409 goto out; 1410 } 1411 1412 next_object: 1413 cnt++; 1414 1415 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1416 if (!free_consistency_checks(s, slab, object, addr)) 1417 goto out; 1418 } 1419 1420 if (s->flags & SLAB_STORE_USER) 1421 set_track_update(s, object, TRACK_FREE, addr, handle); 1422 trace(s, slab, object, 0); 1423 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1424 init_object(s, object, SLUB_RED_INACTIVE); 1425 1426 /* Reached end of constructed freelist yet? */ 1427 if (object != tail) { 1428 object = get_freepointer(s, object); 1429 goto next_object; 1430 } 1431 ret = 1; 1432 1433 out: 1434 if (cnt != bulk_cnt) 1435 slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n", 1436 bulk_cnt, cnt); 1437 1438 slab_unlock(slab, &flags2); 1439 spin_unlock_irqrestore(&n->list_lock, flags); 1440 if (!ret) 1441 slab_fix(s, "Object at 0x%p not freed", object); 1442 return ret; 1443 } 1444 1445 /* 1446 * Parse a block of slub_debug options. Blocks are delimited by ';' 1447 * 1448 * @str: start of block 1449 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1450 * @slabs: return start of list of slabs, or NULL when there's no list 1451 * @init: assume this is initial parsing and not per-kmem-create parsing 1452 * 1453 * returns the start of next block if there's any, or NULL 1454 */ 1455 static char * 1456 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1457 { 1458 bool higher_order_disable = false; 1459 1460 /* Skip any completely empty blocks */ 1461 while (*str && *str == ';') 1462 str++; 1463 1464 if (*str == ',') { 1465 /* 1466 * No options but restriction on slabs. This means full 1467 * debugging for slabs matching a pattern. 1468 */ 1469 *flags = DEBUG_DEFAULT_FLAGS; 1470 goto check_slabs; 1471 } 1472 *flags = 0; 1473 1474 /* Determine which debug features should be switched on */ 1475 for (; *str && *str != ',' && *str != ';'; str++) { 1476 switch (tolower(*str)) { 1477 case '-': 1478 *flags = 0; 1479 break; 1480 case 'f': 1481 *flags |= SLAB_CONSISTENCY_CHECKS; 1482 break; 1483 case 'z': 1484 *flags |= SLAB_RED_ZONE; 1485 break; 1486 case 'p': 1487 *flags |= SLAB_POISON; 1488 break; 1489 case 'u': 1490 *flags |= SLAB_STORE_USER; 1491 break; 1492 case 't': 1493 *flags |= SLAB_TRACE; 1494 break; 1495 case 'a': 1496 *flags |= SLAB_FAILSLAB; 1497 break; 1498 case 'o': 1499 /* 1500 * Avoid enabling debugging on caches if its minimum 1501 * order would increase as a result. 1502 */ 1503 higher_order_disable = true; 1504 break; 1505 default: 1506 if (init) 1507 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1508 } 1509 } 1510 check_slabs: 1511 if (*str == ',') 1512 *slabs = ++str; 1513 else 1514 *slabs = NULL; 1515 1516 /* Skip over the slab list */ 1517 while (*str && *str != ';') 1518 str++; 1519 1520 /* Skip any completely empty blocks */ 1521 while (*str && *str == ';') 1522 str++; 1523 1524 if (init && higher_order_disable) 1525 disable_higher_order_debug = 1; 1526 1527 if (*str) 1528 return str; 1529 else 1530 return NULL; 1531 } 1532 1533 static int __init setup_slub_debug(char *str) 1534 { 1535 slab_flags_t flags; 1536 slab_flags_t global_flags; 1537 char *saved_str; 1538 char *slab_list; 1539 bool global_slub_debug_changed = false; 1540 bool slab_list_specified = false; 1541 1542 global_flags = DEBUG_DEFAULT_FLAGS; 1543 if (*str++ != '=' || !*str) 1544 /* 1545 * No options specified. Switch on full debugging. 1546 */ 1547 goto out; 1548 1549 saved_str = str; 1550 while (str) { 1551 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1552 1553 if (!slab_list) { 1554 global_flags = flags; 1555 global_slub_debug_changed = true; 1556 } else { 1557 slab_list_specified = true; 1558 if (flags & SLAB_STORE_USER) 1559 stack_depot_want_early_init(); 1560 } 1561 } 1562 1563 /* 1564 * For backwards compatibility, a single list of flags with list of 1565 * slabs means debugging is only changed for those slabs, so the global 1566 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1567 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1568 * long as there is no option specifying flags without a slab list. 1569 */ 1570 if (slab_list_specified) { 1571 if (!global_slub_debug_changed) 1572 global_flags = slub_debug; 1573 slub_debug_string = saved_str; 1574 } 1575 out: 1576 slub_debug = global_flags; 1577 if (slub_debug & SLAB_STORE_USER) 1578 stack_depot_want_early_init(); 1579 if (slub_debug != 0 || slub_debug_string) 1580 static_branch_enable(&slub_debug_enabled); 1581 else 1582 static_branch_disable(&slub_debug_enabled); 1583 if ((static_branch_unlikely(&init_on_alloc) || 1584 static_branch_unlikely(&init_on_free)) && 1585 (slub_debug & SLAB_POISON)) 1586 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1587 return 1; 1588 } 1589 1590 __setup("slub_debug", setup_slub_debug); 1591 1592 /* 1593 * kmem_cache_flags - apply debugging options to the cache 1594 * @object_size: the size of an object without meta data 1595 * @flags: flags to set 1596 * @name: name of the cache 1597 * 1598 * Debug option(s) are applied to @flags. In addition to the debug 1599 * option(s), if a slab name (or multiple) is specified i.e. 1600 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1601 * then only the select slabs will receive the debug option(s). 1602 */ 1603 slab_flags_t kmem_cache_flags(unsigned int object_size, 1604 slab_flags_t flags, const char *name) 1605 { 1606 char *iter; 1607 size_t len; 1608 char *next_block; 1609 slab_flags_t block_flags; 1610 slab_flags_t slub_debug_local = slub_debug; 1611 1612 if (flags & SLAB_NO_USER_FLAGS) 1613 return flags; 1614 1615 /* 1616 * If the slab cache is for debugging (e.g. kmemleak) then 1617 * don't store user (stack trace) information by default, 1618 * but let the user enable it via the command line below. 1619 */ 1620 if (flags & SLAB_NOLEAKTRACE) 1621 slub_debug_local &= ~SLAB_STORE_USER; 1622 1623 len = strlen(name); 1624 next_block = slub_debug_string; 1625 /* Go through all blocks of debug options, see if any matches our slab's name */ 1626 while (next_block) { 1627 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1628 if (!iter) 1629 continue; 1630 /* Found a block that has a slab list, search it */ 1631 while (*iter) { 1632 char *end, *glob; 1633 size_t cmplen; 1634 1635 end = strchrnul(iter, ','); 1636 if (next_block && next_block < end) 1637 end = next_block - 1; 1638 1639 glob = strnchr(iter, end - iter, '*'); 1640 if (glob) 1641 cmplen = glob - iter; 1642 else 1643 cmplen = max_t(size_t, len, (end - iter)); 1644 1645 if (!strncmp(name, iter, cmplen)) { 1646 flags |= block_flags; 1647 return flags; 1648 } 1649 1650 if (!*end || *end == ';') 1651 break; 1652 iter = end + 1; 1653 } 1654 } 1655 1656 return flags | slub_debug_local; 1657 } 1658 #else /* !CONFIG_SLUB_DEBUG */ 1659 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1660 static inline 1661 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1662 1663 static inline int alloc_debug_processing(struct kmem_cache *s, 1664 struct slab *slab, void *object, unsigned long addr) { return 0; } 1665 1666 static inline int free_debug_processing( 1667 struct kmem_cache *s, struct slab *slab, 1668 void *head, void *tail, int bulk_cnt, 1669 unsigned long addr) { return 0; } 1670 1671 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1672 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1673 void *object, u8 val) { return 1; } 1674 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1675 struct slab *slab) {} 1676 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1677 struct slab *slab) {} 1678 slab_flags_t kmem_cache_flags(unsigned int object_size, 1679 slab_flags_t flags, const char *name) 1680 { 1681 return flags; 1682 } 1683 #define slub_debug 0 1684 1685 #define disable_higher_order_debug 0 1686 1687 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1688 { return 0; } 1689 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1690 { return 0; } 1691 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1692 int objects) {} 1693 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1694 int objects) {} 1695 1696 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1697 void **freelist, void *nextfree) 1698 { 1699 return false; 1700 } 1701 #endif /* CONFIG_SLUB_DEBUG */ 1702 1703 /* 1704 * Hooks for other subsystems that check memory allocations. In a typical 1705 * production configuration these hooks all should produce no code at all. 1706 */ 1707 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1708 void *x, bool init) 1709 { 1710 kmemleak_free_recursive(x, s->flags); 1711 1712 debug_check_no_locks_freed(x, s->object_size); 1713 1714 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1715 debug_check_no_obj_freed(x, s->object_size); 1716 1717 /* Use KCSAN to help debug racy use-after-free. */ 1718 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1719 __kcsan_check_access(x, s->object_size, 1720 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1721 1722 /* 1723 * As memory initialization might be integrated into KASAN, 1724 * kasan_slab_free and initialization memset's must be 1725 * kept together to avoid discrepancies in behavior. 1726 * 1727 * The initialization memset's clear the object and the metadata, 1728 * but don't touch the SLAB redzone. 1729 */ 1730 if (init) { 1731 int rsize; 1732 1733 if (!kasan_has_integrated_init()) 1734 memset(kasan_reset_tag(x), 0, s->object_size); 1735 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1736 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1737 s->size - s->inuse - rsize); 1738 } 1739 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1740 return kasan_slab_free(s, x, init); 1741 } 1742 1743 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1744 void **head, void **tail, 1745 int *cnt) 1746 { 1747 1748 void *object; 1749 void *next = *head; 1750 void *old_tail = *tail ? *tail : *head; 1751 1752 if (is_kfence_address(next)) { 1753 slab_free_hook(s, next, false); 1754 return true; 1755 } 1756 1757 /* Head and tail of the reconstructed freelist */ 1758 *head = NULL; 1759 *tail = NULL; 1760 1761 do { 1762 object = next; 1763 next = get_freepointer(s, object); 1764 1765 /* If object's reuse doesn't have to be delayed */ 1766 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1767 /* Move object to the new freelist */ 1768 set_freepointer(s, object, *head); 1769 *head = object; 1770 if (!*tail) 1771 *tail = object; 1772 } else { 1773 /* 1774 * Adjust the reconstructed freelist depth 1775 * accordingly if object's reuse is delayed. 1776 */ 1777 --(*cnt); 1778 } 1779 } while (object != old_tail); 1780 1781 if (*head == *tail) 1782 *tail = NULL; 1783 1784 return *head != NULL; 1785 } 1786 1787 static void *setup_object(struct kmem_cache *s, void *object) 1788 { 1789 setup_object_debug(s, object); 1790 object = kasan_init_slab_obj(s, object); 1791 if (unlikely(s->ctor)) { 1792 kasan_unpoison_object_data(s, object); 1793 s->ctor(object); 1794 kasan_poison_object_data(s, object); 1795 } 1796 return object; 1797 } 1798 1799 /* 1800 * Slab allocation and freeing 1801 */ 1802 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 1803 struct kmem_cache_order_objects oo) 1804 { 1805 struct folio *folio; 1806 struct slab *slab; 1807 unsigned int order = oo_order(oo); 1808 1809 if (node == NUMA_NO_NODE) 1810 folio = (struct folio *)alloc_pages(flags, order); 1811 else 1812 folio = (struct folio *)__alloc_pages_node(node, flags, order); 1813 1814 if (!folio) 1815 return NULL; 1816 1817 slab = folio_slab(folio); 1818 __folio_set_slab(folio); 1819 if (page_is_pfmemalloc(folio_page(folio, 0))) 1820 slab_set_pfmemalloc(slab); 1821 1822 return slab; 1823 } 1824 1825 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1826 /* Pre-initialize the random sequence cache */ 1827 static int init_cache_random_seq(struct kmem_cache *s) 1828 { 1829 unsigned int count = oo_objects(s->oo); 1830 int err; 1831 1832 /* Bailout if already initialised */ 1833 if (s->random_seq) 1834 return 0; 1835 1836 err = cache_random_seq_create(s, count, GFP_KERNEL); 1837 if (err) { 1838 pr_err("SLUB: Unable to initialize free list for %s\n", 1839 s->name); 1840 return err; 1841 } 1842 1843 /* Transform to an offset on the set of pages */ 1844 if (s->random_seq) { 1845 unsigned int i; 1846 1847 for (i = 0; i < count; i++) 1848 s->random_seq[i] *= s->size; 1849 } 1850 return 0; 1851 } 1852 1853 /* Initialize each random sequence freelist per cache */ 1854 static void __init init_freelist_randomization(void) 1855 { 1856 struct kmem_cache *s; 1857 1858 mutex_lock(&slab_mutex); 1859 1860 list_for_each_entry(s, &slab_caches, list) 1861 init_cache_random_seq(s); 1862 1863 mutex_unlock(&slab_mutex); 1864 } 1865 1866 /* Get the next entry on the pre-computed freelist randomized */ 1867 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, 1868 unsigned long *pos, void *start, 1869 unsigned long page_limit, 1870 unsigned long freelist_count) 1871 { 1872 unsigned int idx; 1873 1874 /* 1875 * If the target page allocation failed, the number of objects on the 1876 * page might be smaller than the usual size defined by the cache. 1877 */ 1878 do { 1879 idx = s->random_seq[*pos]; 1880 *pos += 1; 1881 if (*pos >= freelist_count) 1882 *pos = 0; 1883 } while (unlikely(idx >= page_limit)); 1884 1885 return (char *)start + idx; 1886 } 1887 1888 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1889 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 1890 { 1891 void *start; 1892 void *cur; 1893 void *next; 1894 unsigned long idx, pos, page_limit, freelist_count; 1895 1896 if (slab->objects < 2 || !s->random_seq) 1897 return false; 1898 1899 freelist_count = oo_objects(s->oo); 1900 pos = get_random_int() % freelist_count; 1901 1902 page_limit = slab->objects * s->size; 1903 start = fixup_red_left(s, slab_address(slab)); 1904 1905 /* First entry is used as the base of the freelist */ 1906 cur = next_freelist_entry(s, slab, &pos, start, page_limit, 1907 freelist_count); 1908 cur = setup_object(s, cur); 1909 slab->freelist = cur; 1910 1911 for (idx = 1; idx < slab->objects; idx++) { 1912 next = next_freelist_entry(s, slab, &pos, start, page_limit, 1913 freelist_count); 1914 next = setup_object(s, next); 1915 set_freepointer(s, cur, next); 1916 cur = next; 1917 } 1918 set_freepointer(s, cur, NULL); 1919 1920 return true; 1921 } 1922 #else 1923 static inline int init_cache_random_seq(struct kmem_cache *s) 1924 { 1925 return 0; 1926 } 1927 static inline void init_freelist_randomization(void) { } 1928 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 1929 { 1930 return false; 1931 } 1932 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1933 1934 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1935 { 1936 struct slab *slab; 1937 struct kmem_cache_order_objects oo = s->oo; 1938 gfp_t alloc_gfp; 1939 void *start, *p, *next; 1940 int idx; 1941 bool shuffle; 1942 1943 flags &= gfp_allowed_mask; 1944 1945 flags |= s->allocflags; 1946 1947 /* 1948 * Let the initial higher-order allocation fail under memory pressure 1949 * so we fall-back to the minimum order allocation. 1950 */ 1951 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1952 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1953 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 1954 1955 slab = alloc_slab_page(alloc_gfp, node, oo); 1956 if (unlikely(!slab)) { 1957 oo = s->min; 1958 alloc_gfp = flags; 1959 /* 1960 * Allocation may have failed due to fragmentation. 1961 * Try a lower order alloc if possible 1962 */ 1963 slab = alloc_slab_page(alloc_gfp, node, oo); 1964 if (unlikely(!slab)) 1965 goto out; 1966 stat(s, ORDER_FALLBACK); 1967 } 1968 1969 slab->objects = oo_objects(oo); 1970 1971 account_slab(slab, oo_order(oo), s, flags); 1972 1973 slab->slab_cache = s; 1974 1975 kasan_poison_slab(slab); 1976 1977 start = slab_address(slab); 1978 1979 setup_slab_debug(s, slab, start); 1980 1981 shuffle = shuffle_freelist(s, slab); 1982 1983 if (!shuffle) { 1984 start = fixup_red_left(s, start); 1985 start = setup_object(s, start); 1986 slab->freelist = start; 1987 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 1988 next = p + s->size; 1989 next = setup_object(s, next); 1990 set_freepointer(s, p, next); 1991 p = next; 1992 } 1993 set_freepointer(s, p, NULL); 1994 } 1995 1996 slab->inuse = slab->objects; 1997 slab->frozen = 1; 1998 1999 out: 2000 if (!slab) 2001 return NULL; 2002 2003 inc_slabs_node(s, slab_nid(slab), slab->objects); 2004 2005 return slab; 2006 } 2007 2008 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2009 { 2010 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2011 flags = kmalloc_fix_flags(flags); 2012 2013 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2014 2015 return allocate_slab(s, 2016 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2017 } 2018 2019 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2020 { 2021 struct folio *folio = slab_folio(slab); 2022 int order = folio_order(folio); 2023 int pages = 1 << order; 2024 2025 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2026 void *p; 2027 2028 slab_pad_check(s, slab); 2029 for_each_object(p, s, slab_address(slab), slab->objects) 2030 check_object(s, slab, p, SLUB_RED_INACTIVE); 2031 } 2032 2033 __slab_clear_pfmemalloc(slab); 2034 __folio_clear_slab(folio); 2035 folio->mapping = NULL; 2036 if (current->reclaim_state) 2037 current->reclaim_state->reclaimed_slab += pages; 2038 unaccount_slab(slab, order, s); 2039 __free_pages(folio_page(folio, 0), order); 2040 } 2041 2042 static void rcu_free_slab(struct rcu_head *h) 2043 { 2044 struct slab *slab = container_of(h, struct slab, rcu_head); 2045 2046 __free_slab(slab->slab_cache, slab); 2047 } 2048 2049 static void free_slab(struct kmem_cache *s, struct slab *slab) 2050 { 2051 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 2052 call_rcu(&slab->rcu_head, rcu_free_slab); 2053 } else 2054 __free_slab(s, slab); 2055 } 2056 2057 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2058 { 2059 dec_slabs_node(s, slab_nid(slab), slab->objects); 2060 free_slab(s, slab); 2061 } 2062 2063 /* 2064 * Management of partially allocated slabs. 2065 */ 2066 static inline void 2067 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2068 { 2069 n->nr_partial++; 2070 if (tail == DEACTIVATE_TO_TAIL) 2071 list_add_tail(&slab->slab_list, &n->partial); 2072 else 2073 list_add(&slab->slab_list, &n->partial); 2074 } 2075 2076 static inline void add_partial(struct kmem_cache_node *n, 2077 struct slab *slab, int tail) 2078 { 2079 lockdep_assert_held(&n->list_lock); 2080 __add_partial(n, slab, tail); 2081 } 2082 2083 static inline void remove_partial(struct kmem_cache_node *n, 2084 struct slab *slab) 2085 { 2086 lockdep_assert_held(&n->list_lock); 2087 list_del(&slab->slab_list); 2088 n->nr_partial--; 2089 } 2090 2091 /* 2092 * Remove slab from the partial list, freeze it and 2093 * return the pointer to the freelist. 2094 * 2095 * Returns a list of objects or NULL if it fails. 2096 */ 2097 static inline void *acquire_slab(struct kmem_cache *s, 2098 struct kmem_cache_node *n, struct slab *slab, 2099 int mode) 2100 { 2101 void *freelist; 2102 unsigned long counters; 2103 struct slab new; 2104 2105 lockdep_assert_held(&n->list_lock); 2106 2107 /* 2108 * Zap the freelist and set the frozen bit. 2109 * The old freelist is the list of objects for the 2110 * per cpu allocation list. 2111 */ 2112 freelist = slab->freelist; 2113 counters = slab->counters; 2114 new.counters = counters; 2115 if (mode) { 2116 new.inuse = slab->objects; 2117 new.freelist = NULL; 2118 } else { 2119 new.freelist = freelist; 2120 } 2121 2122 VM_BUG_ON(new.frozen); 2123 new.frozen = 1; 2124 2125 if (!__cmpxchg_double_slab(s, slab, 2126 freelist, counters, 2127 new.freelist, new.counters, 2128 "acquire_slab")) 2129 return NULL; 2130 2131 remove_partial(n, slab); 2132 WARN_ON(!freelist); 2133 return freelist; 2134 } 2135 2136 #ifdef CONFIG_SLUB_CPU_PARTIAL 2137 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2138 #else 2139 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2140 int drain) { } 2141 #endif 2142 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2143 2144 /* 2145 * Try to allocate a partial slab from a specific node. 2146 */ 2147 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 2148 struct slab **ret_slab, gfp_t gfpflags) 2149 { 2150 struct slab *slab, *slab2; 2151 void *object = NULL; 2152 unsigned long flags; 2153 unsigned int partial_slabs = 0; 2154 2155 /* 2156 * Racy check. If we mistakenly see no partial slabs then we 2157 * just allocate an empty slab. If we mistakenly try to get a 2158 * partial slab and there is none available then get_partial() 2159 * will return NULL. 2160 */ 2161 if (!n || !n->nr_partial) 2162 return NULL; 2163 2164 spin_lock_irqsave(&n->list_lock, flags); 2165 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2166 void *t; 2167 2168 if (!pfmemalloc_match(slab, gfpflags)) 2169 continue; 2170 2171 t = acquire_slab(s, n, slab, object == NULL); 2172 if (!t) 2173 break; 2174 2175 if (!object) { 2176 *ret_slab = slab; 2177 stat(s, ALLOC_FROM_PARTIAL); 2178 object = t; 2179 } else { 2180 put_cpu_partial(s, slab, 0); 2181 stat(s, CPU_PARTIAL_NODE); 2182 partial_slabs++; 2183 } 2184 #ifdef CONFIG_SLUB_CPU_PARTIAL 2185 if (!kmem_cache_has_cpu_partial(s) 2186 || partial_slabs > s->cpu_partial_slabs / 2) 2187 break; 2188 #else 2189 break; 2190 #endif 2191 2192 } 2193 spin_unlock_irqrestore(&n->list_lock, flags); 2194 return object; 2195 } 2196 2197 /* 2198 * Get a slab from somewhere. Search in increasing NUMA distances. 2199 */ 2200 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2201 struct slab **ret_slab) 2202 { 2203 #ifdef CONFIG_NUMA 2204 struct zonelist *zonelist; 2205 struct zoneref *z; 2206 struct zone *zone; 2207 enum zone_type highest_zoneidx = gfp_zone(flags); 2208 void *object; 2209 unsigned int cpuset_mems_cookie; 2210 2211 /* 2212 * The defrag ratio allows a configuration of the tradeoffs between 2213 * inter node defragmentation and node local allocations. A lower 2214 * defrag_ratio increases the tendency to do local allocations 2215 * instead of attempting to obtain partial slabs from other nodes. 2216 * 2217 * If the defrag_ratio is set to 0 then kmalloc() always 2218 * returns node local objects. If the ratio is higher then kmalloc() 2219 * may return off node objects because partial slabs are obtained 2220 * from other nodes and filled up. 2221 * 2222 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2223 * (which makes defrag_ratio = 1000) then every (well almost) 2224 * allocation will first attempt to defrag slab caches on other nodes. 2225 * This means scanning over all nodes to look for partial slabs which 2226 * may be expensive if we do it every time we are trying to find a slab 2227 * with available objects. 2228 */ 2229 if (!s->remote_node_defrag_ratio || 2230 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2231 return NULL; 2232 2233 do { 2234 cpuset_mems_cookie = read_mems_allowed_begin(); 2235 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2236 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2237 struct kmem_cache_node *n; 2238 2239 n = get_node(s, zone_to_nid(zone)); 2240 2241 if (n && cpuset_zone_allowed(zone, flags) && 2242 n->nr_partial > s->min_partial) { 2243 object = get_partial_node(s, n, ret_slab, flags); 2244 if (object) { 2245 /* 2246 * Don't check read_mems_allowed_retry() 2247 * here - if mems_allowed was updated in 2248 * parallel, that was a harmless race 2249 * between allocation and the cpuset 2250 * update 2251 */ 2252 return object; 2253 } 2254 } 2255 } 2256 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2257 #endif /* CONFIG_NUMA */ 2258 return NULL; 2259 } 2260 2261 /* 2262 * Get a partial slab, lock it and return it. 2263 */ 2264 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2265 struct slab **ret_slab) 2266 { 2267 void *object; 2268 int searchnode = node; 2269 2270 if (node == NUMA_NO_NODE) 2271 searchnode = numa_mem_id(); 2272 2273 object = get_partial_node(s, get_node(s, searchnode), ret_slab, flags); 2274 if (object || node != NUMA_NO_NODE) 2275 return object; 2276 2277 return get_any_partial(s, flags, ret_slab); 2278 } 2279 2280 #ifdef CONFIG_PREEMPTION 2281 /* 2282 * Calculate the next globally unique transaction for disambiguation 2283 * during cmpxchg. The transactions start with the cpu number and are then 2284 * incremented by CONFIG_NR_CPUS. 2285 */ 2286 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2287 #else 2288 /* 2289 * No preemption supported therefore also no need to check for 2290 * different cpus. 2291 */ 2292 #define TID_STEP 1 2293 #endif 2294 2295 static inline unsigned long next_tid(unsigned long tid) 2296 { 2297 return tid + TID_STEP; 2298 } 2299 2300 #ifdef SLUB_DEBUG_CMPXCHG 2301 static inline unsigned int tid_to_cpu(unsigned long tid) 2302 { 2303 return tid % TID_STEP; 2304 } 2305 2306 static inline unsigned long tid_to_event(unsigned long tid) 2307 { 2308 return tid / TID_STEP; 2309 } 2310 #endif 2311 2312 static inline unsigned int init_tid(int cpu) 2313 { 2314 return cpu; 2315 } 2316 2317 static inline void note_cmpxchg_failure(const char *n, 2318 const struct kmem_cache *s, unsigned long tid) 2319 { 2320 #ifdef SLUB_DEBUG_CMPXCHG 2321 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2322 2323 pr_info("%s %s: cmpxchg redo ", n, s->name); 2324 2325 #ifdef CONFIG_PREEMPTION 2326 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2327 pr_warn("due to cpu change %d -> %d\n", 2328 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2329 else 2330 #endif 2331 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2332 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2333 tid_to_event(tid), tid_to_event(actual_tid)); 2334 else 2335 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2336 actual_tid, tid, next_tid(tid)); 2337 #endif 2338 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2339 } 2340 2341 static void init_kmem_cache_cpus(struct kmem_cache *s) 2342 { 2343 int cpu; 2344 struct kmem_cache_cpu *c; 2345 2346 for_each_possible_cpu(cpu) { 2347 c = per_cpu_ptr(s->cpu_slab, cpu); 2348 local_lock_init(&c->lock); 2349 c->tid = init_tid(cpu); 2350 } 2351 } 2352 2353 /* 2354 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2355 * unfreezes the slabs and puts it on the proper list. 2356 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2357 * by the caller. 2358 */ 2359 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2360 void *freelist) 2361 { 2362 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST }; 2363 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2364 int free_delta = 0; 2365 enum slab_modes mode = M_NONE; 2366 void *nextfree, *freelist_iter, *freelist_tail; 2367 int tail = DEACTIVATE_TO_HEAD; 2368 unsigned long flags = 0; 2369 struct slab new; 2370 struct slab old; 2371 2372 if (slab->freelist) { 2373 stat(s, DEACTIVATE_REMOTE_FREES); 2374 tail = DEACTIVATE_TO_TAIL; 2375 } 2376 2377 /* 2378 * Stage one: Count the objects on cpu's freelist as free_delta and 2379 * remember the last object in freelist_tail for later splicing. 2380 */ 2381 freelist_tail = NULL; 2382 freelist_iter = freelist; 2383 while (freelist_iter) { 2384 nextfree = get_freepointer(s, freelist_iter); 2385 2386 /* 2387 * If 'nextfree' is invalid, it is possible that the object at 2388 * 'freelist_iter' is already corrupted. So isolate all objects 2389 * starting at 'freelist_iter' by skipping them. 2390 */ 2391 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2392 break; 2393 2394 freelist_tail = freelist_iter; 2395 free_delta++; 2396 2397 freelist_iter = nextfree; 2398 } 2399 2400 /* 2401 * Stage two: Unfreeze the slab while splicing the per-cpu 2402 * freelist to the head of slab's freelist. 2403 * 2404 * Ensure that the slab is unfrozen while the list presence 2405 * reflects the actual number of objects during unfreeze. 2406 * 2407 * We first perform cmpxchg holding lock and insert to list 2408 * when it succeed. If there is mismatch then the slab is not 2409 * unfrozen and number of objects in the slab may have changed. 2410 * Then release lock and retry cmpxchg again. 2411 */ 2412 redo: 2413 2414 old.freelist = READ_ONCE(slab->freelist); 2415 old.counters = READ_ONCE(slab->counters); 2416 VM_BUG_ON(!old.frozen); 2417 2418 /* Determine target state of the slab */ 2419 new.counters = old.counters; 2420 if (freelist_tail) { 2421 new.inuse -= free_delta; 2422 set_freepointer(s, freelist_tail, old.freelist); 2423 new.freelist = freelist; 2424 } else 2425 new.freelist = old.freelist; 2426 2427 new.frozen = 0; 2428 2429 if (!new.inuse && n->nr_partial >= s->min_partial) { 2430 mode = M_FREE; 2431 } else if (new.freelist) { 2432 mode = M_PARTIAL; 2433 /* 2434 * Taking the spinlock removes the possibility that 2435 * acquire_slab() will see a slab that is frozen 2436 */ 2437 spin_lock_irqsave(&n->list_lock, flags); 2438 } else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) { 2439 mode = M_FULL; 2440 /* 2441 * This also ensures that the scanning of full 2442 * slabs from diagnostic functions will not see 2443 * any frozen slabs. 2444 */ 2445 spin_lock_irqsave(&n->list_lock, flags); 2446 } else { 2447 mode = M_FULL_NOLIST; 2448 } 2449 2450 2451 if (!cmpxchg_double_slab(s, slab, 2452 old.freelist, old.counters, 2453 new.freelist, new.counters, 2454 "unfreezing slab")) { 2455 if (mode == M_PARTIAL || mode == M_FULL) 2456 spin_unlock_irqrestore(&n->list_lock, flags); 2457 goto redo; 2458 } 2459 2460 2461 if (mode == M_PARTIAL) { 2462 add_partial(n, slab, tail); 2463 spin_unlock_irqrestore(&n->list_lock, flags); 2464 stat(s, tail); 2465 } else if (mode == M_FREE) { 2466 stat(s, DEACTIVATE_EMPTY); 2467 discard_slab(s, slab); 2468 stat(s, FREE_SLAB); 2469 } else if (mode == M_FULL) { 2470 add_full(s, n, slab); 2471 spin_unlock_irqrestore(&n->list_lock, flags); 2472 stat(s, DEACTIVATE_FULL); 2473 } else if (mode == M_FULL_NOLIST) { 2474 stat(s, DEACTIVATE_FULL); 2475 } 2476 } 2477 2478 #ifdef CONFIG_SLUB_CPU_PARTIAL 2479 static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) 2480 { 2481 struct kmem_cache_node *n = NULL, *n2 = NULL; 2482 struct slab *slab, *slab_to_discard = NULL; 2483 unsigned long flags = 0; 2484 2485 while (partial_slab) { 2486 struct slab new; 2487 struct slab old; 2488 2489 slab = partial_slab; 2490 partial_slab = slab->next; 2491 2492 n2 = get_node(s, slab_nid(slab)); 2493 if (n != n2) { 2494 if (n) 2495 spin_unlock_irqrestore(&n->list_lock, flags); 2496 2497 n = n2; 2498 spin_lock_irqsave(&n->list_lock, flags); 2499 } 2500 2501 do { 2502 2503 old.freelist = slab->freelist; 2504 old.counters = slab->counters; 2505 VM_BUG_ON(!old.frozen); 2506 2507 new.counters = old.counters; 2508 new.freelist = old.freelist; 2509 2510 new.frozen = 0; 2511 2512 } while (!__cmpxchg_double_slab(s, slab, 2513 old.freelist, old.counters, 2514 new.freelist, new.counters, 2515 "unfreezing slab")); 2516 2517 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2518 slab->next = slab_to_discard; 2519 slab_to_discard = slab; 2520 } else { 2521 add_partial(n, slab, DEACTIVATE_TO_TAIL); 2522 stat(s, FREE_ADD_PARTIAL); 2523 } 2524 } 2525 2526 if (n) 2527 spin_unlock_irqrestore(&n->list_lock, flags); 2528 2529 while (slab_to_discard) { 2530 slab = slab_to_discard; 2531 slab_to_discard = slab_to_discard->next; 2532 2533 stat(s, DEACTIVATE_EMPTY); 2534 discard_slab(s, slab); 2535 stat(s, FREE_SLAB); 2536 } 2537 } 2538 2539 /* 2540 * Unfreeze all the cpu partial slabs. 2541 */ 2542 static void unfreeze_partials(struct kmem_cache *s) 2543 { 2544 struct slab *partial_slab; 2545 unsigned long flags; 2546 2547 local_lock_irqsave(&s->cpu_slab->lock, flags); 2548 partial_slab = this_cpu_read(s->cpu_slab->partial); 2549 this_cpu_write(s->cpu_slab->partial, NULL); 2550 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2551 2552 if (partial_slab) 2553 __unfreeze_partials(s, partial_slab); 2554 } 2555 2556 static void unfreeze_partials_cpu(struct kmem_cache *s, 2557 struct kmem_cache_cpu *c) 2558 { 2559 struct slab *partial_slab; 2560 2561 partial_slab = slub_percpu_partial(c); 2562 c->partial = NULL; 2563 2564 if (partial_slab) 2565 __unfreeze_partials(s, partial_slab); 2566 } 2567 2568 /* 2569 * Put a slab that was just frozen (in __slab_free|get_partial_node) into a 2570 * partial slab slot if available. 2571 * 2572 * If we did not find a slot then simply move all the partials to the 2573 * per node partial list. 2574 */ 2575 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 2576 { 2577 struct slab *oldslab; 2578 struct slab *slab_to_unfreeze = NULL; 2579 unsigned long flags; 2580 int slabs = 0; 2581 2582 local_lock_irqsave(&s->cpu_slab->lock, flags); 2583 2584 oldslab = this_cpu_read(s->cpu_slab->partial); 2585 2586 if (oldslab) { 2587 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 2588 /* 2589 * Partial array is full. Move the existing set to the 2590 * per node partial list. Postpone the actual unfreezing 2591 * outside of the critical section. 2592 */ 2593 slab_to_unfreeze = oldslab; 2594 oldslab = NULL; 2595 } else { 2596 slabs = oldslab->slabs; 2597 } 2598 } 2599 2600 slabs++; 2601 2602 slab->slabs = slabs; 2603 slab->next = oldslab; 2604 2605 this_cpu_write(s->cpu_slab->partial, slab); 2606 2607 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2608 2609 if (slab_to_unfreeze) { 2610 __unfreeze_partials(s, slab_to_unfreeze); 2611 stat(s, CPU_PARTIAL_DRAIN); 2612 } 2613 } 2614 2615 #else /* CONFIG_SLUB_CPU_PARTIAL */ 2616 2617 static inline void unfreeze_partials(struct kmem_cache *s) { } 2618 static inline void unfreeze_partials_cpu(struct kmem_cache *s, 2619 struct kmem_cache_cpu *c) { } 2620 2621 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2622 2623 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2624 { 2625 unsigned long flags; 2626 struct slab *slab; 2627 void *freelist; 2628 2629 local_lock_irqsave(&s->cpu_slab->lock, flags); 2630 2631 slab = c->slab; 2632 freelist = c->freelist; 2633 2634 c->slab = NULL; 2635 c->freelist = NULL; 2636 c->tid = next_tid(c->tid); 2637 2638 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2639 2640 if (slab) { 2641 deactivate_slab(s, slab, freelist); 2642 stat(s, CPUSLAB_FLUSH); 2643 } 2644 } 2645 2646 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2647 { 2648 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2649 void *freelist = c->freelist; 2650 struct slab *slab = c->slab; 2651 2652 c->slab = NULL; 2653 c->freelist = NULL; 2654 c->tid = next_tid(c->tid); 2655 2656 if (slab) { 2657 deactivate_slab(s, slab, freelist); 2658 stat(s, CPUSLAB_FLUSH); 2659 } 2660 2661 unfreeze_partials_cpu(s, c); 2662 } 2663 2664 struct slub_flush_work { 2665 struct work_struct work; 2666 struct kmem_cache *s; 2667 bool skip; 2668 }; 2669 2670 /* 2671 * Flush cpu slab. 2672 * 2673 * Called from CPU work handler with migration disabled. 2674 */ 2675 static void flush_cpu_slab(struct work_struct *w) 2676 { 2677 struct kmem_cache *s; 2678 struct kmem_cache_cpu *c; 2679 struct slub_flush_work *sfw; 2680 2681 sfw = container_of(w, struct slub_flush_work, work); 2682 2683 s = sfw->s; 2684 c = this_cpu_ptr(s->cpu_slab); 2685 2686 if (c->slab) 2687 flush_slab(s, c); 2688 2689 unfreeze_partials(s); 2690 } 2691 2692 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 2693 { 2694 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2695 2696 return c->slab || slub_percpu_partial(c); 2697 } 2698 2699 static DEFINE_MUTEX(flush_lock); 2700 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 2701 2702 static void flush_all_cpus_locked(struct kmem_cache *s) 2703 { 2704 struct slub_flush_work *sfw; 2705 unsigned int cpu; 2706 2707 lockdep_assert_cpus_held(); 2708 mutex_lock(&flush_lock); 2709 2710 for_each_online_cpu(cpu) { 2711 sfw = &per_cpu(slub_flush, cpu); 2712 if (!has_cpu_slab(cpu, s)) { 2713 sfw->skip = true; 2714 continue; 2715 } 2716 INIT_WORK(&sfw->work, flush_cpu_slab); 2717 sfw->skip = false; 2718 sfw->s = s; 2719 schedule_work_on(cpu, &sfw->work); 2720 } 2721 2722 for_each_online_cpu(cpu) { 2723 sfw = &per_cpu(slub_flush, cpu); 2724 if (sfw->skip) 2725 continue; 2726 flush_work(&sfw->work); 2727 } 2728 2729 mutex_unlock(&flush_lock); 2730 } 2731 2732 static void flush_all(struct kmem_cache *s) 2733 { 2734 cpus_read_lock(); 2735 flush_all_cpus_locked(s); 2736 cpus_read_unlock(); 2737 } 2738 2739 /* 2740 * Use the cpu notifier to insure that the cpu slabs are flushed when 2741 * necessary. 2742 */ 2743 static int slub_cpu_dead(unsigned int cpu) 2744 { 2745 struct kmem_cache *s; 2746 2747 mutex_lock(&slab_mutex); 2748 list_for_each_entry(s, &slab_caches, list) 2749 __flush_cpu_slab(s, cpu); 2750 mutex_unlock(&slab_mutex); 2751 return 0; 2752 } 2753 2754 /* 2755 * Check if the objects in a per cpu structure fit numa 2756 * locality expectations. 2757 */ 2758 static inline int node_match(struct slab *slab, int node) 2759 { 2760 #ifdef CONFIG_NUMA 2761 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 2762 return 0; 2763 #endif 2764 return 1; 2765 } 2766 2767 #ifdef CONFIG_SLUB_DEBUG 2768 static int count_free(struct slab *slab) 2769 { 2770 return slab->objects - slab->inuse; 2771 } 2772 2773 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2774 { 2775 return atomic_long_read(&n->total_objects); 2776 } 2777 #endif /* CONFIG_SLUB_DEBUG */ 2778 2779 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2780 static unsigned long count_partial(struct kmem_cache_node *n, 2781 int (*get_count)(struct slab *)) 2782 { 2783 unsigned long flags; 2784 unsigned long x = 0; 2785 struct slab *slab; 2786 2787 spin_lock_irqsave(&n->list_lock, flags); 2788 list_for_each_entry(slab, &n->partial, slab_list) 2789 x += get_count(slab); 2790 spin_unlock_irqrestore(&n->list_lock, flags); 2791 return x; 2792 } 2793 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2794 2795 static noinline void 2796 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2797 { 2798 #ifdef CONFIG_SLUB_DEBUG 2799 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2800 DEFAULT_RATELIMIT_BURST); 2801 int node; 2802 struct kmem_cache_node *n; 2803 2804 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2805 return; 2806 2807 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2808 nid, gfpflags, &gfpflags); 2809 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2810 s->name, s->object_size, s->size, oo_order(s->oo), 2811 oo_order(s->min)); 2812 2813 if (oo_order(s->min) > get_order(s->object_size)) 2814 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2815 s->name); 2816 2817 for_each_kmem_cache_node(s, node, n) { 2818 unsigned long nr_slabs; 2819 unsigned long nr_objs; 2820 unsigned long nr_free; 2821 2822 nr_free = count_partial(n, count_free); 2823 nr_slabs = node_nr_slabs(n); 2824 nr_objs = node_nr_objs(n); 2825 2826 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2827 node, nr_slabs, nr_objs, nr_free); 2828 } 2829 #endif 2830 } 2831 2832 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 2833 { 2834 if (unlikely(slab_test_pfmemalloc(slab))) 2835 return gfp_pfmemalloc_allowed(gfpflags); 2836 2837 return true; 2838 } 2839 2840 /* 2841 * Check the slab->freelist and either transfer the freelist to the 2842 * per cpu freelist or deactivate the slab. 2843 * 2844 * The slab is still frozen if the return value is not NULL. 2845 * 2846 * If this function returns NULL then the slab has been unfrozen. 2847 */ 2848 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 2849 { 2850 struct slab new; 2851 unsigned long counters; 2852 void *freelist; 2853 2854 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 2855 2856 do { 2857 freelist = slab->freelist; 2858 counters = slab->counters; 2859 2860 new.counters = counters; 2861 VM_BUG_ON(!new.frozen); 2862 2863 new.inuse = slab->objects; 2864 new.frozen = freelist != NULL; 2865 2866 } while (!__cmpxchg_double_slab(s, slab, 2867 freelist, counters, 2868 NULL, new.counters, 2869 "get_freelist")); 2870 2871 return freelist; 2872 } 2873 2874 /* 2875 * Slow path. The lockless freelist is empty or we need to perform 2876 * debugging duties. 2877 * 2878 * Processing is still very fast if new objects have been freed to the 2879 * regular freelist. In that case we simply take over the regular freelist 2880 * as the lockless freelist and zap the regular freelist. 2881 * 2882 * If that is not working then we fall back to the partial lists. We take the 2883 * first element of the freelist as the object to allocate now and move the 2884 * rest of the freelist to the lockless freelist. 2885 * 2886 * And if we were unable to get a new slab from the partial slab lists then 2887 * we need to allocate a new slab. This is the slowest path since it involves 2888 * a call to the page allocator and the setup of a new slab. 2889 * 2890 * Version of __slab_alloc to use when we know that preemption is 2891 * already disabled (which is the case for bulk allocation). 2892 */ 2893 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2894 unsigned long addr, struct kmem_cache_cpu *c) 2895 { 2896 void *freelist; 2897 struct slab *slab; 2898 unsigned long flags; 2899 2900 stat(s, ALLOC_SLOWPATH); 2901 2902 reread_slab: 2903 2904 slab = READ_ONCE(c->slab); 2905 if (!slab) { 2906 /* 2907 * if the node is not online or has no normal memory, just 2908 * ignore the node constraint 2909 */ 2910 if (unlikely(node != NUMA_NO_NODE && 2911 !node_isset(node, slab_nodes))) 2912 node = NUMA_NO_NODE; 2913 goto new_slab; 2914 } 2915 redo: 2916 2917 if (unlikely(!node_match(slab, node))) { 2918 /* 2919 * same as above but node_match() being false already 2920 * implies node != NUMA_NO_NODE 2921 */ 2922 if (!node_isset(node, slab_nodes)) { 2923 node = NUMA_NO_NODE; 2924 } else { 2925 stat(s, ALLOC_NODE_MISMATCH); 2926 goto deactivate_slab; 2927 } 2928 } 2929 2930 /* 2931 * By rights, we should be searching for a slab page that was 2932 * PFMEMALLOC but right now, we are losing the pfmemalloc 2933 * information when the page leaves the per-cpu allocator 2934 */ 2935 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 2936 goto deactivate_slab; 2937 2938 /* must check again c->slab in case we got preempted and it changed */ 2939 local_lock_irqsave(&s->cpu_slab->lock, flags); 2940 if (unlikely(slab != c->slab)) { 2941 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2942 goto reread_slab; 2943 } 2944 freelist = c->freelist; 2945 if (freelist) 2946 goto load_freelist; 2947 2948 freelist = get_freelist(s, slab); 2949 2950 if (!freelist) { 2951 c->slab = NULL; 2952 c->tid = next_tid(c->tid); 2953 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2954 stat(s, DEACTIVATE_BYPASS); 2955 goto new_slab; 2956 } 2957 2958 stat(s, ALLOC_REFILL); 2959 2960 load_freelist: 2961 2962 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 2963 2964 /* 2965 * freelist is pointing to the list of objects to be used. 2966 * slab is pointing to the slab from which the objects are obtained. 2967 * That slab must be frozen for per cpu allocations to work. 2968 */ 2969 VM_BUG_ON(!c->slab->frozen); 2970 c->freelist = get_freepointer(s, freelist); 2971 c->tid = next_tid(c->tid); 2972 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2973 return freelist; 2974 2975 deactivate_slab: 2976 2977 local_lock_irqsave(&s->cpu_slab->lock, flags); 2978 if (slab != c->slab) { 2979 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2980 goto reread_slab; 2981 } 2982 freelist = c->freelist; 2983 c->slab = NULL; 2984 c->freelist = NULL; 2985 c->tid = next_tid(c->tid); 2986 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2987 deactivate_slab(s, slab, freelist); 2988 2989 new_slab: 2990 2991 if (slub_percpu_partial(c)) { 2992 local_lock_irqsave(&s->cpu_slab->lock, flags); 2993 if (unlikely(c->slab)) { 2994 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2995 goto reread_slab; 2996 } 2997 if (unlikely(!slub_percpu_partial(c))) { 2998 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2999 /* we were preempted and partial list got empty */ 3000 goto new_objects; 3001 } 3002 3003 slab = c->slab = slub_percpu_partial(c); 3004 slub_set_percpu_partial(c, slab); 3005 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3006 stat(s, CPU_PARTIAL_ALLOC); 3007 goto redo; 3008 } 3009 3010 new_objects: 3011 3012 freelist = get_partial(s, gfpflags, node, &slab); 3013 if (freelist) 3014 goto check_new_slab; 3015 3016 slub_put_cpu_ptr(s->cpu_slab); 3017 slab = new_slab(s, gfpflags, node); 3018 c = slub_get_cpu_ptr(s->cpu_slab); 3019 3020 if (unlikely(!slab)) { 3021 slab_out_of_memory(s, gfpflags, node); 3022 return NULL; 3023 } 3024 3025 /* 3026 * No other reference to the slab yet so we can 3027 * muck around with it freely without cmpxchg 3028 */ 3029 freelist = slab->freelist; 3030 slab->freelist = NULL; 3031 3032 stat(s, ALLOC_SLAB); 3033 3034 check_new_slab: 3035 3036 if (kmem_cache_debug(s)) { 3037 if (!alloc_debug_processing(s, slab, freelist, addr)) { 3038 /* Slab failed checks. Next slab needed */ 3039 goto new_slab; 3040 } else { 3041 /* 3042 * For debug case, we don't load freelist so that all 3043 * allocations go through alloc_debug_processing() 3044 */ 3045 goto return_single; 3046 } 3047 } 3048 3049 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3050 /* 3051 * For !pfmemalloc_match() case we don't load freelist so that 3052 * we don't make further mismatched allocations easier. 3053 */ 3054 goto return_single; 3055 3056 retry_load_slab: 3057 3058 local_lock_irqsave(&s->cpu_slab->lock, flags); 3059 if (unlikely(c->slab)) { 3060 void *flush_freelist = c->freelist; 3061 struct slab *flush_slab = c->slab; 3062 3063 c->slab = NULL; 3064 c->freelist = NULL; 3065 c->tid = next_tid(c->tid); 3066 3067 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3068 3069 deactivate_slab(s, flush_slab, flush_freelist); 3070 3071 stat(s, CPUSLAB_FLUSH); 3072 3073 goto retry_load_slab; 3074 } 3075 c->slab = slab; 3076 3077 goto load_freelist; 3078 3079 return_single: 3080 3081 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3082 return freelist; 3083 } 3084 3085 /* 3086 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3087 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3088 * pointer. 3089 */ 3090 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3091 unsigned long addr, struct kmem_cache_cpu *c) 3092 { 3093 void *p; 3094 3095 #ifdef CONFIG_PREEMPT_COUNT 3096 /* 3097 * We may have been preempted and rescheduled on a different 3098 * cpu before disabling preemption. Need to reload cpu area 3099 * pointer. 3100 */ 3101 c = slub_get_cpu_ptr(s->cpu_slab); 3102 #endif 3103 3104 p = ___slab_alloc(s, gfpflags, node, addr, c); 3105 #ifdef CONFIG_PREEMPT_COUNT 3106 slub_put_cpu_ptr(s->cpu_slab); 3107 #endif 3108 return p; 3109 } 3110 3111 /* 3112 * If the object has been wiped upon free, make sure it's fully initialized by 3113 * zeroing out freelist pointer. 3114 */ 3115 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3116 void *obj) 3117 { 3118 if (unlikely(slab_want_init_on_free(s)) && obj) 3119 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3120 0, sizeof(void *)); 3121 } 3122 3123 /* 3124 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3125 * have the fastpath folded into their functions. So no function call 3126 * overhead for requests that can be satisfied on the fastpath. 3127 * 3128 * The fastpath works by first checking if the lockless freelist can be used. 3129 * If not then __slab_alloc is called for slow processing. 3130 * 3131 * Otherwise we can simply pick the next object from the lockless free list. 3132 */ 3133 static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 3134 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3135 { 3136 void *object; 3137 struct kmem_cache_cpu *c; 3138 struct slab *slab; 3139 unsigned long tid; 3140 struct obj_cgroup *objcg = NULL; 3141 bool init = false; 3142 3143 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); 3144 if (!s) 3145 return NULL; 3146 3147 object = kfence_alloc(s, orig_size, gfpflags); 3148 if (unlikely(object)) 3149 goto out; 3150 3151 redo: 3152 /* 3153 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3154 * enabled. We may switch back and forth between cpus while 3155 * reading from one cpu area. That does not matter as long 3156 * as we end up on the original cpu again when doing the cmpxchg. 3157 * 3158 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3159 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3160 * the tid. If we are preempted and switched to another cpu between the 3161 * two reads, it's OK as the two are still associated with the same cpu 3162 * and cmpxchg later will validate the cpu. 3163 */ 3164 c = raw_cpu_ptr(s->cpu_slab); 3165 tid = READ_ONCE(c->tid); 3166 3167 /* 3168 * Irqless object alloc/free algorithm used here depends on sequence 3169 * of fetching cpu_slab's data. tid should be fetched before anything 3170 * on c to guarantee that object and slab associated with previous tid 3171 * won't be used with current tid. If we fetch tid first, object and 3172 * slab could be one associated with next tid and our alloc/free 3173 * request will be failed. In this case, we will retry. So, no problem. 3174 */ 3175 barrier(); 3176 3177 /* 3178 * The transaction ids are globally unique per cpu and per operation on 3179 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3180 * occurs on the right processor and that there was no operation on the 3181 * linked list in between. 3182 */ 3183 3184 object = c->freelist; 3185 slab = c->slab; 3186 /* 3187 * We cannot use the lockless fastpath on PREEMPT_RT because if a 3188 * slowpath has taken the local_lock_irqsave(), it is not protected 3189 * against a fast path operation in an irq handler. So we need to take 3190 * the slow path which uses local_lock. It is still relatively fast if 3191 * there is a suitable cpu freelist. 3192 */ 3193 if (IS_ENABLED(CONFIG_PREEMPT_RT) || 3194 unlikely(!object || !slab || !node_match(slab, node))) { 3195 object = __slab_alloc(s, gfpflags, node, addr, c); 3196 } else { 3197 void *next_object = get_freepointer_safe(s, object); 3198 3199 /* 3200 * The cmpxchg will only match if there was no additional 3201 * operation and if we are on the right processor. 3202 * 3203 * The cmpxchg does the following atomically (without lock 3204 * semantics!) 3205 * 1. Relocate first pointer to the current per cpu area. 3206 * 2. Verify that tid and freelist have not been changed 3207 * 3. If they were not changed replace tid and freelist 3208 * 3209 * Since this is without lock semantics the protection is only 3210 * against code executing on this cpu *not* from access by 3211 * other cpus. 3212 */ 3213 if (unlikely(!this_cpu_cmpxchg_double( 3214 s->cpu_slab->freelist, s->cpu_slab->tid, 3215 object, tid, 3216 next_object, next_tid(tid)))) { 3217 3218 note_cmpxchg_failure("slab_alloc", s, tid); 3219 goto redo; 3220 } 3221 prefetch_freepointer(s, next_object); 3222 stat(s, ALLOC_FASTPATH); 3223 } 3224 3225 maybe_wipe_obj_freeptr(s, object); 3226 init = slab_want_init_on_alloc(gfpflags, s); 3227 3228 out: 3229 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); 3230 3231 return object; 3232 } 3233 3234 static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, 3235 gfp_t gfpflags, unsigned long addr, size_t orig_size) 3236 { 3237 return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); 3238 } 3239 3240 static __always_inline 3241 void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 3242 gfp_t gfpflags) 3243 { 3244 void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); 3245 3246 trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size, 3247 s->size, gfpflags); 3248 3249 return ret; 3250 } 3251 3252 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 3253 { 3254 return __kmem_cache_alloc_lru(s, NULL, gfpflags); 3255 } 3256 EXPORT_SYMBOL(kmem_cache_alloc); 3257 3258 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 3259 gfp_t gfpflags) 3260 { 3261 return __kmem_cache_alloc_lru(s, lru, gfpflags); 3262 } 3263 EXPORT_SYMBOL(kmem_cache_alloc_lru); 3264 3265 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, 3266 int node, size_t orig_size, 3267 unsigned long caller) 3268 { 3269 return slab_alloc_node(s, NULL, gfpflags, node, 3270 caller, orig_size); 3271 } 3272 3273 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3274 { 3275 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 3276 3277 trace_kmem_cache_alloc_node(_RET_IP_, ret, s, 3278 s->object_size, s->size, gfpflags, node); 3279 3280 return ret; 3281 } 3282 EXPORT_SYMBOL(kmem_cache_alloc_node); 3283 3284 /* 3285 * Slow path handling. This may still be called frequently since objects 3286 * have a longer lifetime than the cpu slabs in most processing loads. 3287 * 3288 * So we still attempt to reduce cache line usage. Just take the slab 3289 * lock and free the item. If there is no additional partial slab 3290 * handling required then we can return immediately. 3291 */ 3292 static void __slab_free(struct kmem_cache *s, struct slab *slab, 3293 void *head, void *tail, int cnt, 3294 unsigned long addr) 3295 3296 { 3297 void *prior; 3298 int was_frozen; 3299 struct slab new; 3300 unsigned long counters; 3301 struct kmem_cache_node *n = NULL; 3302 unsigned long flags; 3303 3304 stat(s, FREE_SLOWPATH); 3305 3306 if (kfence_free(head)) 3307 return; 3308 3309 if (kmem_cache_debug(s) && 3310 !free_debug_processing(s, slab, head, tail, cnt, addr)) 3311 return; 3312 3313 do { 3314 if (unlikely(n)) { 3315 spin_unlock_irqrestore(&n->list_lock, flags); 3316 n = NULL; 3317 } 3318 prior = slab->freelist; 3319 counters = slab->counters; 3320 set_freepointer(s, tail, prior); 3321 new.counters = counters; 3322 was_frozen = new.frozen; 3323 new.inuse -= cnt; 3324 if ((!new.inuse || !prior) && !was_frozen) { 3325 3326 if (kmem_cache_has_cpu_partial(s) && !prior) { 3327 3328 /* 3329 * Slab was on no list before and will be 3330 * partially empty 3331 * We can defer the list move and instead 3332 * freeze it. 3333 */ 3334 new.frozen = 1; 3335 3336 } else { /* Needs to be taken off a list */ 3337 3338 n = get_node(s, slab_nid(slab)); 3339 /* 3340 * Speculatively acquire the list_lock. 3341 * If the cmpxchg does not succeed then we may 3342 * drop the list_lock without any processing. 3343 * 3344 * Otherwise the list_lock will synchronize with 3345 * other processors updating the list of slabs. 3346 */ 3347 spin_lock_irqsave(&n->list_lock, flags); 3348 3349 } 3350 } 3351 3352 } while (!cmpxchg_double_slab(s, slab, 3353 prior, counters, 3354 head, new.counters, 3355 "__slab_free")); 3356 3357 if (likely(!n)) { 3358 3359 if (likely(was_frozen)) { 3360 /* 3361 * The list lock was not taken therefore no list 3362 * activity can be necessary. 3363 */ 3364 stat(s, FREE_FROZEN); 3365 } else if (new.frozen) { 3366 /* 3367 * If we just froze the slab then put it onto the 3368 * per cpu partial list. 3369 */ 3370 put_cpu_partial(s, slab, 1); 3371 stat(s, CPU_PARTIAL_FREE); 3372 } 3373 3374 return; 3375 } 3376 3377 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3378 goto slab_empty; 3379 3380 /* 3381 * Objects left in the slab. If it was not on the partial list before 3382 * then add it. 3383 */ 3384 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3385 remove_full(s, n, slab); 3386 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3387 stat(s, FREE_ADD_PARTIAL); 3388 } 3389 spin_unlock_irqrestore(&n->list_lock, flags); 3390 return; 3391 3392 slab_empty: 3393 if (prior) { 3394 /* 3395 * Slab on the partial list. 3396 */ 3397 remove_partial(n, slab); 3398 stat(s, FREE_REMOVE_PARTIAL); 3399 } else { 3400 /* Slab must be on the full list */ 3401 remove_full(s, n, slab); 3402 } 3403 3404 spin_unlock_irqrestore(&n->list_lock, flags); 3405 stat(s, FREE_SLAB); 3406 discard_slab(s, slab); 3407 } 3408 3409 /* 3410 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3411 * can perform fastpath freeing without additional function calls. 3412 * 3413 * The fastpath is only possible if we are freeing to the current cpu slab 3414 * of this processor. This typically the case if we have just allocated 3415 * the item before. 3416 * 3417 * If fastpath is not possible then fall back to __slab_free where we deal 3418 * with all sorts of special processing. 3419 * 3420 * Bulk free of a freelist with several objects (all pointing to the 3421 * same slab) possible by specifying head and tail ptr, plus objects 3422 * count (cnt). Bulk free indicated by tail pointer being set. 3423 */ 3424 static __always_inline void do_slab_free(struct kmem_cache *s, 3425 struct slab *slab, void *head, void *tail, 3426 int cnt, unsigned long addr) 3427 { 3428 void *tail_obj = tail ? : head; 3429 struct kmem_cache_cpu *c; 3430 unsigned long tid; 3431 3432 redo: 3433 /* 3434 * Determine the currently cpus per cpu slab. 3435 * The cpu may change afterward. However that does not matter since 3436 * data is retrieved via this pointer. If we are on the same cpu 3437 * during the cmpxchg then the free will succeed. 3438 */ 3439 c = raw_cpu_ptr(s->cpu_slab); 3440 tid = READ_ONCE(c->tid); 3441 3442 /* Same with comment on barrier() in slab_alloc_node() */ 3443 barrier(); 3444 3445 if (likely(slab == c->slab)) { 3446 #ifndef CONFIG_PREEMPT_RT 3447 void **freelist = READ_ONCE(c->freelist); 3448 3449 set_freepointer(s, tail_obj, freelist); 3450 3451 if (unlikely(!this_cpu_cmpxchg_double( 3452 s->cpu_slab->freelist, s->cpu_slab->tid, 3453 freelist, tid, 3454 head, next_tid(tid)))) { 3455 3456 note_cmpxchg_failure("slab_free", s, tid); 3457 goto redo; 3458 } 3459 #else /* CONFIG_PREEMPT_RT */ 3460 /* 3461 * We cannot use the lockless fastpath on PREEMPT_RT because if 3462 * a slowpath has taken the local_lock_irqsave(), it is not 3463 * protected against a fast path operation in an irq handler. So 3464 * we need to take the local_lock. We shouldn't simply defer to 3465 * __slab_free() as that wouldn't use the cpu freelist at all. 3466 */ 3467 void **freelist; 3468 3469 local_lock(&s->cpu_slab->lock); 3470 c = this_cpu_ptr(s->cpu_slab); 3471 if (unlikely(slab != c->slab)) { 3472 local_unlock(&s->cpu_slab->lock); 3473 goto redo; 3474 } 3475 tid = c->tid; 3476 freelist = c->freelist; 3477 3478 set_freepointer(s, tail_obj, freelist); 3479 c->freelist = head; 3480 c->tid = next_tid(tid); 3481 3482 local_unlock(&s->cpu_slab->lock); 3483 #endif 3484 stat(s, FREE_FASTPATH); 3485 } else 3486 __slab_free(s, slab, head, tail_obj, cnt, addr); 3487 3488 } 3489 3490 static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab, 3491 void *head, void *tail, void **p, int cnt, 3492 unsigned long addr) 3493 { 3494 memcg_slab_free_hook(s, slab, p, cnt); 3495 /* 3496 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3497 * to remove objects, whose reuse must be delayed. 3498 */ 3499 if (slab_free_freelist_hook(s, &head, &tail, &cnt)) 3500 do_slab_free(s, slab, head, tail, cnt, addr); 3501 } 3502 3503 #ifdef CONFIG_KASAN_GENERIC 3504 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3505 { 3506 do_slab_free(cache, virt_to_slab(x), x, NULL, 1, addr); 3507 } 3508 #endif 3509 3510 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) 3511 { 3512 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); 3513 } 3514 3515 void kmem_cache_free(struct kmem_cache *s, void *x) 3516 { 3517 s = cache_from_obj(s, x); 3518 if (!s) 3519 return; 3520 trace_kmem_cache_free(_RET_IP_, x, s->name); 3521 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); 3522 } 3523 EXPORT_SYMBOL(kmem_cache_free); 3524 3525 struct detached_freelist { 3526 struct slab *slab; 3527 void *tail; 3528 void *freelist; 3529 int cnt; 3530 struct kmem_cache *s; 3531 }; 3532 3533 /* 3534 * This function progressively scans the array with free objects (with 3535 * a limited look ahead) and extract objects belonging to the same 3536 * slab. It builds a detached freelist directly within the given 3537 * slab/objects. This can happen without any need for 3538 * synchronization, because the objects are owned by running process. 3539 * The freelist is build up as a single linked list in the objects. 3540 * The idea is, that this detached freelist can then be bulk 3541 * transferred to the real freelist(s), but only requiring a single 3542 * synchronization primitive. Look ahead in the array is limited due 3543 * to performance reasons. 3544 */ 3545 static inline 3546 int build_detached_freelist(struct kmem_cache *s, size_t size, 3547 void **p, struct detached_freelist *df) 3548 { 3549 int lookahead = 3; 3550 void *object; 3551 struct folio *folio; 3552 size_t same; 3553 3554 object = p[--size]; 3555 folio = virt_to_folio(object); 3556 if (!s) { 3557 /* Handle kalloc'ed objects */ 3558 if (unlikely(!folio_test_slab(folio))) { 3559 free_large_kmalloc(folio, object); 3560 df->slab = NULL; 3561 return size; 3562 } 3563 /* Derive kmem_cache from object */ 3564 df->slab = folio_slab(folio); 3565 df->s = df->slab->slab_cache; 3566 } else { 3567 df->slab = folio_slab(folio); 3568 df->s = cache_from_obj(s, object); /* Support for memcg */ 3569 } 3570 3571 /* Start new detached freelist */ 3572 df->tail = object; 3573 df->freelist = object; 3574 df->cnt = 1; 3575 3576 if (is_kfence_address(object)) 3577 return size; 3578 3579 set_freepointer(df->s, object, NULL); 3580 3581 same = size; 3582 while (size) { 3583 object = p[--size]; 3584 /* df->slab is always set at this point */ 3585 if (df->slab == virt_to_slab(object)) { 3586 /* Opportunity build freelist */ 3587 set_freepointer(df->s, object, df->freelist); 3588 df->freelist = object; 3589 df->cnt++; 3590 same--; 3591 if (size != same) 3592 swap(p[size], p[same]); 3593 continue; 3594 } 3595 3596 /* Limit look ahead search */ 3597 if (!--lookahead) 3598 break; 3599 } 3600 3601 return same; 3602 } 3603 3604 /* Note that interrupts must be enabled when calling this function. */ 3605 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3606 { 3607 if (!size) 3608 return; 3609 3610 do { 3611 struct detached_freelist df; 3612 3613 size = build_detached_freelist(s, size, p, &df); 3614 if (!df.slab) 3615 continue; 3616 3617 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, 3618 _RET_IP_); 3619 } while (likely(size)); 3620 } 3621 EXPORT_SYMBOL(kmem_cache_free_bulk); 3622 3623 /* Note that interrupts must be enabled when calling this function. */ 3624 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3625 void **p) 3626 { 3627 struct kmem_cache_cpu *c; 3628 int i; 3629 struct obj_cgroup *objcg = NULL; 3630 3631 /* memcg and kmem_cache debug support */ 3632 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); 3633 if (unlikely(!s)) 3634 return false; 3635 /* 3636 * Drain objects in the per cpu slab, while disabling local 3637 * IRQs, which protects against PREEMPT and interrupts 3638 * handlers invoking normal fastpath. 3639 */ 3640 c = slub_get_cpu_ptr(s->cpu_slab); 3641 local_lock_irq(&s->cpu_slab->lock); 3642 3643 for (i = 0; i < size; i++) { 3644 void *object = kfence_alloc(s, s->object_size, flags); 3645 3646 if (unlikely(object)) { 3647 p[i] = object; 3648 continue; 3649 } 3650 3651 object = c->freelist; 3652 if (unlikely(!object)) { 3653 /* 3654 * We may have removed an object from c->freelist using 3655 * the fastpath in the previous iteration; in that case, 3656 * c->tid has not been bumped yet. 3657 * Since ___slab_alloc() may reenable interrupts while 3658 * allocating memory, we should bump c->tid now. 3659 */ 3660 c->tid = next_tid(c->tid); 3661 3662 local_unlock_irq(&s->cpu_slab->lock); 3663 3664 /* 3665 * Invoking slow path likely have side-effect 3666 * of re-populating per CPU c->freelist 3667 */ 3668 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3669 _RET_IP_, c); 3670 if (unlikely(!p[i])) 3671 goto error; 3672 3673 c = this_cpu_ptr(s->cpu_slab); 3674 maybe_wipe_obj_freeptr(s, p[i]); 3675 3676 local_lock_irq(&s->cpu_slab->lock); 3677 3678 continue; /* goto for-loop */ 3679 } 3680 c->freelist = get_freepointer(s, object); 3681 p[i] = object; 3682 maybe_wipe_obj_freeptr(s, p[i]); 3683 } 3684 c->tid = next_tid(c->tid); 3685 local_unlock_irq(&s->cpu_slab->lock); 3686 slub_put_cpu_ptr(s->cpu_slab); 3687 3688 /* 3689 * memcg and kmem_cache debug support and memory initialization. 3690 * Done outside of the IRQ disabled fastpath loop. 3691 */ 3692 slab_post_alloc_hook(s, objcg, flags, size, p, 3693 slab_want_init_on_alloc(flags, s)); 3694 return i; 3695 error: 3696 slub_put_cpu_ptr(s->cpu_slab); 3697 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3698 kmem_cache_free_bulk(s, i, p); 3699 return 0; 3700 } 3701 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3702 3703 3704 /* 3705 * Object placement in a slab is made very easy because we always start at 3706 * offset 0. If we tune the size of the object to the alignment then we can 3707 * get the required alignment by putting one properly sized object after 3708 * another. 3709 * 3710 * Notice that the allocation order determines the sizes of the per cpu 3711 * caches. Each processor has always one slab available for allocations. 3712 * Increasing the allocation order reduces the number of times that slabs 3713 * must be moved on and off the partial lists and is therefore a factor in 3714 * locking overhead. 3715 */ 3716 3717 /* 3718 * Minimum / Maximum order of slab pages. This influences locking overhead 3719 * and slab fragmentation. A higher order reduces the number of partial slabs 3720 * and increases the number of allocations possible without having to 3721 * take the list_lock. 3722 */ 3723 static unsigned int slub_min_order; 3724 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3725 static unsigned int slub_min_objects; 3726 3727 /* 3728 * Calculate the order of allocation given an slab object size. 3729 * 3730 * The order of allocation has significant impact on performance and other 3731 * system components. Generally order 0 allocations should be preferred since 3732 * order 0 does not cause fragmentation in the page allocator. Larger objects 3733 * be problematic to put into order 0 slabs because there may be too much 3734 * unused space left. We go to a higher order if more than 1/16th of the slab 3735 * would be wasted. 3736 * 3737 * In order to reach satisfactory performance we must ensure that a minimum 3738 * number of objects is in one slab. Otherwise we may generate too much 3739 * activity on the partial lists which requires taking the list_lock. This is 3740 * less a concern for large slabs though which are rarely used. 3741 * 3742 * slub_max_order specifies the order where we begin to stop considering the 3743 * number of objects in a slab as critical. If we reach slub_max_order then 3744 * we try to keep the page order as low as possible. So we accept more waste 3745 * of space in favor of a small page order. 3746 * 3747 * Higher order allocations also allow the placement of more objects in a 3748 * slab and thereby reduce object handling overhead. If the user has 3749 * requested a higher minimum order then we start with that one instead of 3750 * the smallest order which will fit the object. 3751 */ 3752 static inline unsigned int calc_slab_order(unsigned int size, 3753 unsigned int min_objects, unsigned int max_order, 3754 unsigned int fract_leftover) 3755 { 3756 unsigned int min_order = slub_min_order; 3757 unsigned int order; 3758 3759 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3760 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3761 3762 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3763 order <= max_order; order++) { 3764 3765 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3766 unsigned int rem; 3767 3768 rem = slab_size % size; 3769 3770 if (rem <= slab_size / fract_leftover) 3771 break; 3772 } 3773 3774 return order; 3775 } 3776 3777 static inline int calculate_order(unsigned int size) 3778 { 3779 unsigned int order; 3780 unsigned int min_objects; 3781 unsigned int max_objects; 3782 unsigned int nr_cpus; 3783 3784 /* 3785 * Attempt to find best configuration for a slab. This 3786 * works by first attempting to generate a layout with 3787 * the best configuration and backing off gradually. 3788 * 3789 * First we increase the acceptable waste in a slab. Then 3790 * we reduce the minimum objects required in a slab. 3791 */ 3792 min_objects = slub_min_objects; 3793 if (!min_objects) { 3794 /* 3795 * Some architectures will only update present cpus when 3796 * onlining them, so don't trust the number if it's just 1. But 3797 * we also don't want to use nr_cpu_ids always, as on some other 3798 * architectures, there can be many possible cpus, but never 3799 * onlined. Here we compromise between trying to avoid too high 3800 * order on systems that appear larger than they are, and too 3801 * low order on systems that appear smaller than they are. 3802 */ 3803 nr_cpus = num_present_cpus(); 3804 if (nr_cpus <= 1) 3805 nr_cpus = nr_cpu_ids; 3806 min_objects = 4 * (fls(nr_cpus) + 1); 3807 } 3808 max_objects = order_objects(slub_max_order, size); 3809 min_objects = min(min_objects, max_objects); 3810 3811 while (min_objects > 1) { 3812 unsigned int fraction; 3813 3814 fraction = 16; 3815 while (fraction >= 4) { 3816 order = calc_slab_order(size, min_objects, 3817 slub_max_order, fraction); 3818 if (order <= slub_max_order) 3819 return order; 3820 fraction /= 2; 3821 } 3822 min_objects--; 3823 } 3824 3825 /* 3826 * We were unable to place multiple objects in a slab. Now 3827 * lets see if we can place a single object there. 3828 */ 3829 order = calc_slab_order(size, 1, slub_max_order, 1); 3830 if (order <= slub_max_order) 3831 return order; 3832 3833 /* 3834 * Doh this slab cannot be placed using slub_max_order. 3835 */ 3836 order = calc_slab_order(size, 1, MAX_ORDER, 1); 3837 if (order < MAX_ORDER) 3838 return order; 3839 return -ENOSYS; 3840 } 3841 3842 static void 3843 init_kmem_cache_node(struct kmem_cache_node *n) 3844 { 3845 n->nr_partial = 0; 3846 spin_lock_init(&n->list_lock); 3847 INIT_LIST_HEAD(&n->partial); 3848 #ifdef CONFIG_SLUB_DEBUG 3849 atomic_long_set(&n->nr_slabs, 0); 3850 atomic_long_set(&n->total_objects, 0); 3851 INIT_LIST_HEAD(&n->full); 3852 #endif 3853 } 3854 3855 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3856 { 3857 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3858 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3859 3860 /* 3861 * Must align to double word boundary for the double cmpxchg 3862 * instructions to work; see __pcpu_double_call_return_bool(). 3863 */ 3864 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3865 2 * sizeof(void *)); 3866 3867 if (!s->cpu_slab) 3868 return 0; 3869 3870 init_kmem_cache_cpus(s); 3871 3872 return 1; 3873 } 3874 3875 static struct kmem_cache *kmem_cache_node; 3876 3877 /* 3878 * No kmalloc_node yet so do it by hand. We know that this is the first 3879 * slab on the node for this slabcache. There are no concurrent accesses 3880 * possible. 3881 * 3882 * Note that this function only works on the kmem_cache_node 3883 * when allocating for the kmem_cache_node. This is used for bootstrapping 3884 * memory on a fresh node that has no slab structures yet. 3885 */ 3886 static void early_kmem_cache_node_alloc(int node) 3887 { 3888 struct slab *slab; 3889 struct kmem_cache_node *n; 3890 3891 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3892 3893 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3894 3895 BUG_ON(!slab); 3896 if (slab_nid(slab) != node) { 3897 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3898 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3899 } 3900 3901 n = slab->freelist; 3902 BUG_ON(!n); 3903 #ifdef CONFIG_SLUB_DEBUG 3904 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3905 init_tracking(kmem_cache_node, n); 3906 #endif 3907 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 3908 slab->freelist = get_freepointer(kmem_cache_node, n); 3909 slab->inuse = 1; 3910 slab->frozen = 0; 3911 kmem_cache_node->node[node] = n; 3912 init_kmem_cache_node(n); 3913 inc_slabs_node(kmem_cache_node, node, slab->objects); 3914 3915 /* 3916 * No locks need to be taken here as it has just been 3917 * initialized and there is no concurrent access. 3918 */ 3919 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 3920 } 3921 3922 static void free_kmem_cache_nodes(struct kmem_cache *s) 3923 { 3924 int node; 3925 struct kmem_cache_node *n; 3926 3927 for_each_kmem_cache_node(s, node, n) { 3928 s->node[node] = NULL; 3929 kmem_cache_free(kmem_cache_node, n); 3930 } 3931 } 3932 3933 void __kmem_cache_release(struct kmem_cache *s) 3934 { 3935 cache_random_seq_destroy(s); 3936 free_percpu(s->cpu_slab); 3937 free_kmem_cache_nodes(s); 3938 } 3939 3940 static int init_kmem_cache_nodes(struct kmem_cache *s) 3941 { 3942 int node; 3943 3944 for_each_node_mask(node, slab_nodes) { 3945 struct kmem_cache_node *n; 3946 3947 if (slab_state == DOWN) { 3948 early_kmem_cache_node_alloc(node); 3949 continue; 3950 } 3951 n = kmem_cache_alloc_node(kmem_cache_node, 3952 GFP_KERNEL, node); 3953 3954 if (!n) { 3955 free_kmem_cache_nodes(s); 3956 return 0; 3957 } 3958 3959 init_kmem_cache_node(n); 3960 s->node[node] = n; 3961 } 3962 return 1; 3963 } 3964 3965 static void set_cpu_partial(struct kmem_cache *s) 3966 { 3967 #ifdef CONFIG_SLUB_CPU_PARTIAL 3968 unsigned int nr_objects; 3969 3970 /* 3971 * cpu_partial determined the maximum number of objects kept in the 3972 * per cpu partial lists of a processor. 3973 * 3974 * Per cpu partial lists mainly contain slabs that just have one 3975 * object freed. If they are used for allocation then they can be 3976 * filled up again with minimal effort. The slab will never hit the 3977 * per node partial lists and therefore no locking will be required. 3978 * 3979 * For backwards compatibility reasons, this is determined as number 3980 * of objects, even though we now limit maximum number of pages, see 3981 * slub_set_cpu_partial() 3982 */ 3983 if (!kmem_cache_has_cpu_partial(s)) 3984 nr_objects = 0; 3985 else if (s->size >= PAGE_SIZE) 3986 nr_objects = 6; 3987 else if (s->size >= 1024) 3988 nr_objects = 24; 3989 else if (s->size >= 256) 3990 nr_objects = 52; 3991 else 3992 nr_objects = 120; 3993 3994 slub_set_cpu_partial(s, nr_objects); 3995 #endif 3996 } 3997 3998 /* 3999 * calculate_sizes() determines the order and the distribution of data within 4000 * a slab object. 4001 */ 4002 static int calculate_sizes(struct kmem_cache *s) 4003 { 4004 slab_flags_t flags = s->flags; 4005 unsigned int size = s->object_size; 4006 unsigned int order; 4007 4008 /* 4009 * Round up object size to the next word boundary. We can only 4010 * place the free pointer at word boundaries and this determines 4011 * the possible location of the free pointer. 4012 */ 4013 size = ALIGN(size, sizeof(void *)); 4014 4015 #ifdef CONFIG_SLUB_DEBUG 4016 /* 4017 * Determine if we can poison the object itself. If the user of 4018 * the slab may touch the object after free or before allocation 4019 * then we should never poison the object itself. 4020 */ 4021 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 4022 !s->ctor) 4023 s->flags |= __OBJECT_POISON; 4024 else 4025 s->flags &= ~__OBJECT_POISON; 4026 4027 4028 /* 4029 * If we are Redzoning then check if there is some space between the 4030 * end of the object and the free pointer. If not then add an 4031 * additional word to have some bytes to store Redzone information. 4032 */ 4033 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 4034 size += sizeof(void *); 4035 #endif 4036 4037 /* 4038 * With that we have determined the number of bytes in actual use 4039 * by the object and redzoning. 4040 */ 4041 s->inuse = size; 4042 4043 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 4044 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 4045 s->ctor) { 4046 /* 4047 * Relocate free pointer after the object if it is not 4048 * permitted to overwrite the first word of the object on 4049 * kmem_cache_free. 4050 * 4051 * This is the case if we do RCU, have a constructor or 4052 * destructor, are poisoning the objects, or are 4053 * redzoning an object smaller than sizeof(void *). 4054 * 4055 * The assumption that s->offset >= s->inuse means free 4056 * pointer is outside of the object is used in the 4057 * freeptr_outside_object() function. If that is no 4058 * longer true, the function needs to be modified. 4059 */ 4060 s->offset = size; 4061 size += sizeof(void *); 4062 } else { 4063 /* 4064 * Store freelist pointer near middle of object to keep 4065 * it away from the edges of the object to avoid small 4066 * sized over/underflows from neighboring allocations. 4067 */ 4068 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 4069 } 4070 4071 #ifdef CONFIG_SLUB_DEBUG 4072 if (flags & SLAB_STORE_USER) 4073 /* 4074 * Need to store information about allocs and frees after 4075 * the object. 4076 */ 4077 size += 2 * sizeof(struct track); 4078 #endif 4079 4080 kasan_cache_create(s, &size, &s->flags); 4081 #ifdef CONFIG_SLUB_DEBUG 4082 if (flags & SLAB_RED_ZONE) { 4083 /* 4084 * Add some empty padding so that we can catch 4085 * overwrites from earlier objects rather than let 4086 * tracking information or the free pointer be 4087 * corrupted if a user writes before the start 4088 * of the object. 4089 */ 4090 size += sizeof(void *); 4091 4092 s->red_left_pad = sizeof(void *); 4093 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 4094 size += s->red_left_pad; 4095 } 4096 #endif 4097 4098 /* 4099 * SLUB stores one object immediately after another beginning from 4100 * offset 0. In order to align the objects we have to simply size 4101 * each object to conform to the alignment. 4102 */ 4103 size = ALIGN(size, s->align); 4104 s->size = size; 4105 s->reciprocal_size = reciprocal_value(size); 4106 order = calculate_order(size); 4107 4108 if ((int)order < 0) 4109 return 0; 4110 4111 s->allocflags = 0; 4112 if (order) 4113 s->allocflags |= __GFP_COMP; 4114 4115 if (s->flags & SLAB_CACHE_DMA) 4116 s->allocflags |= GFP_DMA; 4117 4118 if (s->flags & SLAB_CACHE_DMA32) 4119 s->allocflags |= GFP_DMA32; 4120 4121 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4122 s->allocflags |= __GFP_RECLAIMABLE; 4123 4124 /* 4125 * Determine the number of objects per slab 4126 */ 4127 s->oo = oo_make(order, size); 4128 s->min = oo_make(get_order(size), size); 4129 4130 return !!oo_objects(s->oo); 4131 } 4132 4133 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 4134 { 4135 s->flags = kmem_cache_flags(s->size, flags, s->name); 4136 #ifdef CONFIG_SLAB_FREELIST_HARDENED 4137 s->random = get_random_long(); 4138 #endif 4139 4140 if (!calculate_sizes(s)) 4141 goto error; 4142 if (disable_higher_order_debug) { 4143 /* 4144 * Disable debugging flags that store metadata if the min slab 4145 * order increased. 4146 */ 4147 if (get_order(s->size) > get_order(s->object_size)) { 4148 s->flags &= ~DEBUG_METADATA_FLAGS; 4149 s->offset = 0; 4150 if (!calculate_sizes(s)) 4151 goto error; 4152 } 4153 } 4154 4155 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 4156 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 4157 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 4158 /* Enable fast mode */ 4159 s->flags |= __CMPXCHG_DOUBLE; 4160 #endif 4161 4162 /* 4163 * The larger the object size is, the more slabs we want on the partial 4164 * list to avoid pounding the page allocator excessively. 4165 */ 4166 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 4167 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 4168 4169 set_cpu_partial(s); 4170 4171 #ifdef CONFIG_NUMA 4172 s->remote_node_defrag_ratio = 1000; 4173 #endif 4174 4175 /* Initialize the pre-computed randomized freelist if slab is up */ 4176 if (slab_state >= UP) { 4177 if (init_cache_random_seq(s)) 4178 goto error; 4179 } 4180 4181 if (!init_kmem_cache_nodes(s)) 4182 goto error; 4183 4184 if (alloc_kmem_cache_cpus(s)) 4185 return 0; 4186 4187 error: 4188 __kmem_cache_release(s); 4189 return -EINVAL; 4190 } 4191 4192 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 4193 const char *text) 4194 { 4195 #ifdef CONFIG_SLUB_DEBUG 4196 void *addr = slab_address(slab); 4197 unsigned long flags; 4198 unsigned long *map; 4199 void *p; 4200 4201 slab_err(s, slab, text, s->name); 4202 slab_lock(slab, &flags); 4203 4204 map = get_map(s, slab); 4205 for_each_object(p, s, addr, slab->objects) { 4206 4207 if (!test_bit(__obj_to_index(s, addr, p), map)) { 4208 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 4209 print_tracking(s, p); 4210 } 4211 } 4212 put_map(map); 4213 slab_unlock(slab, &flags); 4214 #endif 4215 } 4216 4217 /* 4218 * Attempt to free all partial slabs on a node. 4219 * This is called from __kmem_cache_shutdown(). We must take list_lock 4220 * because sysfs file might still access partial list after the shutdowning. 4221 */ 4222 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 4223 { 4224 LIST_HEAD(discard); 4225 struct slab *slab, *h; 4226 4227 BUG_ON(irqs_disabled()); 4228 spin_lock_irq(&n->list_lock); 4229 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 4230 if (!slab->inuse) { 4231 remove_partial(n, slab); 4232 list_add(&slab->slab_list, &discard); 4233 } else { 4234 list_slab_objects(s, slab, 4235 "Objects remaining in %s on __kmem_cache_shutdown()"); 4236 } 4237 } 4238 spin_unlock_irq(&n->list_lock); 4239 4240 list_for_each_entry_safe(slab, h, &discard, slab_list) 4241 discard_slab(s, slab); 4242 } 4243 4244 bool __kmem_cache_empty(struct kmem_cache *s) 4245 { 4246 int node; 4247 struct kmem_cache_node *n; 4248 4249 for_each_kmem_cache_node(s, node, n) 4250 if (n->nr_partial || slabs_node(s, node)) 4251 return false; 4252 return true; 4253 } 4254 4255 /* 4256 * Release all resources used by a slab cache. 4257 */ 4258 int __kmem_cache_shutdown(struct kmem_cache *s) 4259 { 4260 int node; 4261 struct kmem_cache_node *n; 4262 4263 flush_all_cpus_locked(s); 4264 /* Attempt to free all objects */ 4265 for_each_kmem_cache_node(s, node, n) { 4266 free_partial(s, n); 4267 if (n->nr_partial || slabs_node(s, node)) 4268 return 1; 4269 } 4270 return 0; 4271 } 4272 4273 #ifdef CONFIG_PRINTK 4274 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 4275 { 4276 void *base; 4277 int __maybe_unused i; 4278 unsigned int objnr; 4279 void *objp; 4280 void *objp0; 4281 struct kmem_cache *s = slab->slab_cache; 4282 struct track __maybe_unused *trackp; 4283 4284 kpp->kp_ptr = object; 4285 kpp->kp_slab = slab; 4286 kpp->kp_slab_cache = s; 4287 base = slab_address(slab); 4288 objp0 = kasan_reset_tag(object); 4289 #ifdef CONFIG_SLUB_DEBUG 4290 objp = restore_red_left(s, objp0); 4291 #else 4292 objp = objp0; 4293 #endif 4294 objnr = obj_to_index(s, slab, objp); 4295 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 4296 objp = base + s->size * objnr; 4297 kpp->kp_objp = objp; 4298 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 4299 || (objp - base) % s->size) || 4300 !(s->flags & SLAB_STORE_USER)) 4301 return; 4302 #ifdef CONFIG_SLUB_DEBUG 4303 objp = fixup_red_left(s, objp); 4304 trackp = get_track(s, objp, TRACK_ALLOC); 4305 kpp->kp_ret = (void *)trackp->addr; 4306 #ifdef CONFIG_STACKDEPOT 4307 { 4308 depot_stack_handle_t handle; 4309 unsigned long *entries; 4310 unsigned int nr_entries; 4311 4312 handle = READ_ONCE(trackp->handle); 4313 if (handle) { 4314 nr_entries = stack_depot_fetch(handle, &entries); 4315 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4316 kpp->kp_stack[i] = (void *)entries[i]; 4317 } 4318 4319 trackp = get_track(s, objp, TRACK_FREE); 4320 handle = READ_ONCE(trackp->handle); 4321 if (handle) { 4322 nr_entries = stack_depot_fetch(handle, &entries); 4323 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4324 kpp->kp_free_stack[i] = (void *)entries[i]; 4325 } 4326 } 4327 #endif 4328 #endif 4329 } 4330 #endif 4331 4332 /******************************************************************** 4333 * Kmalloc subsystem 4334 *******************************************************************/ 4335 4336 static int __init setup_slub_min_order(char *str) 4337 { 4338 get_option(&str, (int *)&slub_min_order); 4339 4340 return 1; 4341 } 4342 4343 __setup("slub_min_order=", setup_slub_min_order); 4344 4345 static int __init setup_slub_max_order(char *str) 4346 { 4347 get_option(&str, (int *)&slub_max_order); 4348 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4349 4350 return 1; 4351 } 4352 4353 __setup("slub_max_order=", setup_slub_max_order); 4354 4355 static int __init setup_slub_min_objects(char *str) 4356 { 4357 get_option(&str, (int *)&slub_min_objects); 4358 4359 return 1; 4360 } 4361 4362 __setup("slub_min_objects=", setup_slub_min_objects); 4363 4364 #ifdef CONFIG_HARDENED_USERCOPY 4365 /* 4366 * Rejects incorrectly sized objects and objects that are to be copied 4367 * to/from userspace but do not fall entirely within the containing slab 4368 * cache's usercopy region. 4369 * 4370 * Returns NULL if check passes, otherwise const char * to name of cache 4371 * to indicate an error. 4372 */ 4373 void __check_heap_object(const void *ptr, unsigned long n, 4374 const struct slab *slab, bool to_user) 4375 { 4376 struct kmem_cache *s; 4377 unsigned int offset; 4378 bool is_kfence = is_kfence_address(ptr); 4379 4380 ptr = kasan_reset_tag(ptr); 4381 4382 /* Find object and usable object size. */ 4383 s = slab->slab_cache; 4384 4385 /* Reject impossible pointers. */ 4386 if (ptr < slab_address(slab)) 4387 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4388 to_user, 0, n); 4389 4390 /* Find offset within object. */ 4391 if (is_kfence) 4392 offset = ptr - kfence_object_start(ptr); 4393 else 4394 offset = (ptr - slab_address(slab)) % s->size; 4395 4396 /* Adjust for redzone and reject if within the redzone. */ 4397 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4398 if (offset < s->red_left_pad) 4399 usercopy_abort("SLUB object in left red zone", 4400 s->name, to_user, offset, n); 4401 offset -= s->red_left_pad; 4402 } 4403 4404 /* Allow address range falling entirely within usercopy region. */ 4405 if (offset >= s->useroffset && 4406 offset - s->useroffset <= s->usersize && 4407 n <= s->useroffset - offset + s->usersize) 4408 return; 4409 4410 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4411 } 4412 #endif /* CONFIG_HARDENED_USERCOPY */ 4413 4414 #define SHRINK_PROMOTE_MAX 32 4415 4416 /* 4417 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4418 * up most to the head of the partial lists. New allocations will then 4419 * fill those up and thus they can be removed from the partial lists. 4420 * 4421 * The slabs with the least items are placed last. This results in them 4422 * being allocated from last increasing the chance that the last objects 4423 * are freed in them. 4424 */ 4425 static int __kmem_cache_do_shrink(struct kmem_cache *s) 4426 { 4427 int node; 4428 int i; 4429 struct kmem_cache_node *n; 4430 struct slab *slab; 4431 struct slab *t; 4432 struct list_head discard; 4433 struct list_head promote[SHRINK_PROMOTE_MAX]; 4434 unsigned long flags; 4435 int ret = 0; 4436 4437 for_each_kmem_cache_node(s, node, n) { 4438 INIT_LIST_HEAD(&discard); 4439 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4440 INIT_LIST_HEAD(promote + i); 4441 4442 spin_lock_irqsave(&n->list_lock, flags); 4443 4444 /* 4445 * Build lists of slabs to discard or promote. 4446 * 4447 * Note that concurrent frees may occur while we hold the 4448 * list_lock. slab->inuse here is the upper limit. 4449 */ 4450 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 4451 int free = slab->objects - slab->inuse; 4452 4453 /* Do not reread slab->inuse */ 4454 barrier(); 4455 4456 /* We do not keep full slabs on the list */ 4457 BUG_ON(free <= 0); 4458 4459 if (free == slab->objects) { 4460 list_move(&slab->slab_list, &discard); 4461 n->nr_partial--; 4462 } else if (free <= SHRINK_PROMOTE_MAX) 4463 list_move(&slab->slab_list, promote + free - 1); 4464 } 4465 4466 /* 4467 * Promote the slabs filled up most to the head of the 4468 * partial list. 4469 */ 4470 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4471 list_splice(promote + i, &n->partial); 4472 4473 spin_unlock_irqrestore(&n->list_lock, flags); 4474 4475 /* Release empty slabs */ 4476 list_for_each_entry_safe(slab, t, &discard, slab_list) 4477 discard_slab(s, slab); 4478 4479 if (slabs_node(s, node)) 4480 ret = 1; 4481 } 4482 4483 return ret; 4484 } 4485 4486 int __kmem_cache_shrink(struct kmem_cache *s) 4487 { 4488 flush_all(s); 4489 return __kmem_cache_do_shrink(s); 4490 } 4491 4492 static int slab_mem_going_offline_callback(void *arg) 4493 { 4494 struct kmem_cache *s; 4495 4496 mutex_lock(&slab_mutex); 4497 list_for_each_entry(s, &slab_caches, list) { 4498 flush_all_cpus_locked(s); 4499 __kmem_cache_do_shrink(s); 4500 } 4501 mutex_unlock(&slab_mutex); 4502 4503 return 0; 4504 } 4505 4506 static void slab_mem_offline_callback(void *arg) 4507 { 4508 struct memory_notify *marg = arg; 4509 int offline_node; 4510 4511 offline_node = marg->status_change_nid_normal; 4512 4513 /* 4514 * If the node still has available memory. we need kmem_cache_node 4515 * for it yet. 4516 */ 4517 if (offline_node < 0) 4518 return; 4519 4520 mutex_lock(&slab_mutex); 4521 node_clear(offline_node, slab_nodes); 4522 /* 4523 * We no longer free kmem_cache_node structures here, as it would be 4524 * racy with all get_node() users, and infeasible to protect them with 4525 * slab_mutex. 4526 */ 4527 mutex_unlock(&slab_mutex); 4528 } 4529 4530 static int slab_mem_going_online_callback(void *arg) 4531 { 4532 struct kmem_cache_node *n; 4533 struct kmem_cache *s; 4534 struct memory_notify *marg = arg; 4535 int nid = marg->status_change_nid_normal; 4536 int ret = 0; 4537 4538 /* 4539 * If the node's memory is already available, then kmem_cache_node is 4540 * already created. Nothing to do. 4541 */ 4542 if (nid < 0) 4543 return 0; 4544 4545 /* 4546 * We are bringing a node online. No memory is available yet. We must 4547 * allocate a kmem_cache_node structure in order to bring the node 4548 * online. 4549 */ 4550 mutex_lock(&slab_mutex); 4551 list_for_each_entry(s, &slab_caches, list) { 4552 /* 4553 * The structure may already exist if the node was previously 4554 * onlined and offlined. 4555 */ 4556 if (get_node(s, nid)) 4557 continue; 4558 /* 4559 * XXX: kmem_cache_alloc_node will fallback to other nodes 4560 * since memory is not yet available from the node that 4561 * is brought up. 4562 */ 4563 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4564 if (!n) { 4565 ret = -ENOMEM; 4566 goto out; 4567 } 4568 init_kmem_cache_node(n); 4569 s->node[nid] = n; 4570 } 4571 /* 4572 * Any cache created after this point will also have kmem_cache_node 4573 * initialized for the new node. 4574 */ 4575 node_set(nid, slab_nodes); 4576 out: 4577 mutex_unlock(&slab_mutex); 4578 return ret; 4579 } 4580 4581 static int slab_memory_callback(struct notifier_block *self, 4582 unsigned long action, void *arg) 4583 { 4584 int ret = 0; 4585 4586 switch (action) { 4587 case MEM_GOING_ONLINE: 4588 ret = slab_mem_going_online_callback(arg); 4589 break; 4590 case MEM_GOING_OFFLINE: 4591 ret = slab_mem_going_offline_callback(arg); 4592 break; 4593 case MEM_OFFLINE: 4594 case MEM_CANCEL_ONLINE: 4595 slab_mem_offline_callback(arg); 4596 break; 4597 case MEM_ONLINE: 4598 case MEM_CANCEL_OFFLINE: 4599 break; 4600 } 4601 if (ret) 4602 ret = notifier_from_errno(ret); 4603 else 4604 ret = NOTIFY_OK; 4605 return ret; 4606 } 4607 4608 static struct notifier_block slab_memory_callback_nb = { 4609 .notifier_call = slab_memory_callback, 4610 .priority = SLAB_CALLBACK_PRI, 4611 }; 4612 4613 /******************************************************************** 4614 * Basic setup of slabs 4615 *******************************************************************/ 4616 4617 /* 4618 * Used for early kmem_cache structures that were allocated using 4619 * the page allocator. Allocate them properly then fix up the pointers 4620 * that may be pointing to the wrong kmem_cache structure. 4621 */ 4622 4623 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4624 { 4625 int node; 4626 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4627 struct kmem_cache_node *n; 4628 4629 memcpy(s, static_cache, kmem_cache->object_size); 4630 4631 /* 4632 * This runs very early, and only the boot processor is supposed to be 4633 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4634 * IPIs around. 4635 */ 4636 __flush_cpu_slab(s, smp_processor_id()); 4637 for_each_kmem_cache_node(s, node, n) { 4638 struct slab *p; 4639 4640 list_for_each_entry(p, &n->partial, slab_list) 4641 p->slab_cache = s; 4642 4643 #ifdef CONFIG_SLUB_DEBUG 4644 list_for_each_entry(p, &n->full, slab_list) 4645 p->slab_cache = s; 4646 #endif 4647 } 4648 list_add(&s->list, &slab_caches); 4649 return s; 4650 } 4651 4652 void __init kmem_cache_init(void) 4653 { 4654 static __initdata struct kmem_cache boot_kmem_cache, 4655 boot_kmem_cache_node; 4656 int node; 4657 4658 if (debug_guardpage_minorder()) 4659 slub_max_order = 0; 4660 4661 /* Print slub debugging pointers without hashing */ 4662 if (__slub_debug_enabled()) 4663 no_hash_pointers_enable(NULL); 4664 4665 kmem_cache_node = &boot_kmem_cache_node; 4666 kmem_cache = &boot_kmem_cache; 4667 4668 /* 4669 * Initialize the nodemask for which we will allocate per node 4670 * structures. Here we don't need taking slab_mutex yet. 4671 */ 4672 for_each_node_state(node, N_NORMAL_MEMORY) 4673 node_set(node, slab_nodes); 4674 4675 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4676 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4677 4678 register_hotmemory_notifier(&slab_memory_callback_nb); 4679 4680 /* Able to allocate the per node structures */ 4681 slab_state = PARTIAL; 4682 4683 create_boot_cache(kmem_cache, "kmem_cache", 4684 offsetof(struct kmem_cache, node) + 4685 nr_node_ids * sizeof(struct kmem_cache_node *), 4686 SLAB_HWCACHE_ALIGN, 0, 0); 4687 4688 kmem_cache = bootstrap(&boot_kmem_cache); 4689 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4690 4691 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4692 setup_kmalloc_cache_index_table(); 4693 create_kmalloc_caches(0); 4694 4695 /* Setup random freelists for each cache */ 4696 init_freelist_randomization(); 4697 4698 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4699 slub_cpu_dead); 4700 4701 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4702 cache_line_size(), 4703 slub_min_order, slub_max_order, slub_min_objects, 4704 nr_cpu_ids, nr_node_ids); 4705 } 4706 4707 void __init kmem_cache_init_late(void) 4708 { 4709 } 4710 4711 struct kmem_cache * 4712 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4713 slab_flags_t flags, void (*ctor)(void *)) 4714 { 4715 struct kmem_cache *s; 4716 4717 s = find_mergeable(size, align, flags, name, ctor); 4718 if (s) { 4719 if (sysfs_slab_alias(s, name)) 4720 return NULL; 4721 4722 s->refcount++; 4723 4724 /* 4725 * Adjust the object sizes so that we clear 4726 * the complete object on kzalloc. 4727 */ 4728 s->object_size = max(s->object_size, size); 4729 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4730 } 4731 4732 return s; 4733 } 4734 4735 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4736 { 4737 int err; 4738 4739 err = kmem_cache_open(s, flags); 4740 if (err) 4741 return err; 4742 4743 /* Mutex is not taken during early boot */ 4744 if (slab_state <= UP) 4745 return 0; 4746 4747 err = sysfs_slab_add(s); 4748 if (err) { 4749 __kmem_cache_release(s); 4750 return err; 4751 } 4752 4753 if (s->flags & SLAB_STORE_USER) 4754 debugfs_slab_add(s); 4755 4756 return 0; 4757 } 4758 4759 #ifdef CONFIG_SYSFS 4760 static int count_inuse(struct slab *slab) 4761 { 4762 return slab->inuse; 4763 } 4764 4765 static int count_total(struct slab *slab) 4766 { 4767 return slab->objects; 4768 } 4769 #endif 4770 4771 #ifdef CONFIG_SLUB_DEBUG 4772 static void validate_slab(struct kmem_cache *s, struct slab *slab, 4773 unsigned long *obj_map) 4774 { 4775 void *p; 4776 void *addr = slab_address(slab); 4777 unsigned long flags; 4778 4779 slab_lock(slab, &flags); 4780 4781 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 4782 goto unlock; 4783 4784 /* Now we know that a valid freelist exists */ 4785 __fill_map(obj_map, s, slab); 4786 for_each_object(p, s, addr, slab->objects) { 4787 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 4788 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4789 4790 if (!check_object(s, slab, p, val)) 4791 break; 4792 } 4793 unlock: 4794 slab_unlock(slab, &flags); 4795 } 4796 4797 static int validate_slab_node(struct kmem_cache *s, 4798 struct kmem_cache_node *n, unsigned long *obj_map) 4799 { 4800 unsigned long count = 0; 4801 struct slab *slab; 4802 unsigned long flags; 4803 4804 spin_lock_irqsave(&n->list_lock, flags); 4805 4806 list_for_each_entry(slab, &n->partial, slab_list) { 4807 validate_slab(s, slab, obj_map); 4808 count++; 4809 } 4810 if (count != n->nr_partial) { 4811 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4812 s->name, count, n->nr_partial); 4813 slab_add_kunit_errors(); 4814 } 4815 4816 if (!(s->flags & SLAB_STORE_USER)) 4817 goto out; 4818 4819 list_for_each_entry(slab, &n->full, slab_list) { 4820 validate_slab(s, slab, obj_map); 4821 count++; 4822 } 4823 if (count != atomic_long_read(&n->nr_slabs)) { 4824 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4825 s->name, count, atomic_long_read(&n->nr_slabs)); 4826 slab_add_kunit_errors(); 4827 } 4828 4829 out: 4830 spin_unlock_irqrestore(&n->list_lock, flags); 4831 return count; 4832 } 4833 4834 long validate_slab_cache(struct kmem_cache *s) 4835 { 4836 int node; 4837 unsigned long count = 0; 4838 struct kmem_cache_node *n; 4839 unsigned long *obj_map; 4840 4841 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 4842 if (!obj_map) 4843 return -ENOMEM; 4844 4845 flush_all(s); 4846 for_each_kmem_cache_node(s, node, n) 4847 count += validate_slab_node(s, n, obj_map); 4848 4849 bitmap_free(obj_map); 4850 4851 return count; 4852 } 4853 EXPORT_SYMBOL(validate_slab_cache); 4854 4855 #ifdef CONFIG_DEBUG_FS 4856 /* 4857 * Generate lists of code addresses where slabcache objects are allocated 4858 * and freed. 4859 */ 4860 4861 struct location { 4862 depot_stack_handle_t handle; 4863 unsigned long count; 4864 unsigned long addr; 4865 long long sum_time; 4866 long min_time; 4867 long max_time; 4868 long min_pid; 4869 long max_pid; 4870 DECLARE_BITMAP(cpus, NR_CPUS); 4871 nodemask_t nodes; 4872 }; 4873 4874 struct loc_track { 4875 unsigned long max; 4876 unsigned long count; 4877 struct location *loc; 4878 loff_t idx; 4879 }; 4880 4881 static struct dentry *slab_debugfs_root; 4882 4883 static void free_loc_track(struct loc_track *t) 4884 { 4885 if (t->max) 4886 free_pages((unsigned long)t->loc, 4887 get_order(sizeof(struct location) * t->max)); 4888 } 4889 4890 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4891 { 4892 struct location *l; 4893 int order; 4894 4895 order = get_order(sizeof(struct location) * max); 4896 4897 l = (void *)__get_free_pages(flags, order); 4898 if (!l) 4899 return 0; 4900 4901 if (t->count) { 4902 memcpy(l, t->loc, sizeof(struct location) * t->count); 4903 free_loc_track(t); 4904 } 4905 t->max = max; 4906 t->loc = l; 4907 return 1; 4908 } 4909 4910 static int add_location(struct loc_track *t, struct kmem_cache *s, 4911 const struct track *track) 4912 { 4913 long start, end, pos; 4914 struct location *l; 4915 unsigned long caddr, chandle; 4916 unsigned long age = jiffies - track->when; 4917 depot_stack_handle_t handle = 0; 4918 4919 #ifdef CONFIG_STACKDEPOT 4920 handle = READ_ONCE(track->handle); 4921 #endif 4922 start = -1; 4923 end = t->count; 4924 4925 for ( ; ; ) { 4926 pos = start + (end - start + 1) / 2; 4927 4928 /* 4929 * There is nothing at "end". If we end up there 4930 * we need to add something to before end. 4931 */ 4932 if (pos == end) 4933 break; 4934 4935 caddr = t->loc[pos].addr; 4936 chandle = t->loc[pos].handle; 4937 if ((track->addr == caddr) && (handle == chandle)) { 4938 4939 l = &t->loc[pos]; 4940 l->count++; 4941 if (track->when) { 4942 l->sum_time += age; 4943 if (age < l->min_time) 4944 l->min_time = age; 4945 if (age > l->max_time) 4946 l->max_time = age; 4947 4948 if (track->pid < l->min_pid) 4949 l->min_pid = track->pid; 4950 if (track->pid > l->max_pid) 4951 l->max_pid = track->pid; 4952 4953 cpumask_set_cpu(track->cpu, 4954 to_cpumask(l->cpus)); 4955 } 4956 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4957 return 1; 4958 } 4959 4960 if (track->addr < caddr) 4961 end = pos; 4962 else if (track->addr == caddr && handle < chandle) 4963 end = pos; 4964 else 4965 start = pos; 4966 } 4967 4968 /* 4969 * Not found. Insert new tracking element. 4970 */ 4971 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4972 return 0; 4973 4974 l = t->loc + pos; 4975 if (pos < t->count) 4976 memmove(l + 1, l, 4977 (t->count - pos) * sizeof(struct location)); 4978 t->count++; 4979 l->count = 1; 4980 l->addr = track->addr; 4981 l->sum_time = age; 4982 l->min_time = age; 4983 l->max_time = age; 4984 l->min_pid = track->pid; 4985 l->max_pid = track->pid; 4986 l->handle = handle; 4987 cpumask_clear(to_cpumask(l->cpus)); 4988 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4989 nodes_clear(l->nodes); 4990 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4991 return 1; 4992 } 4993 4994 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4995 struct slab *slab, enum track_item alloc, 4996 unsigned long *obj_map) 4997 { 4998 void *addr = slab_address(slab); 4999 void *p; 5000 5001 __fill_map(obj_map, s, slab); 5002 5003 for_each_object(p, s, addr, slab->objects) 5004 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 5005 add_location(t, s, get_track(s, p, alloc)); 5006 } 5007 #endif /* CONFIG_DEBUG_FS */ 5008 #endif /* CONFIG_SLUB_DEBUG */ 5009 5010 #ifdef CONFIG_SYSFS 5011 enum slab_stat_type { 5012 SL_ALL, /* All slabs */ 5013 SL_PARTIAL, /* Only partially allocated slabs */ 5014 SL_CPU, /* Only slabs used for cpu caches */ 5015 SL_OBJECTS, /* Determine allocated objects not slabs */ 5016 SL_TOTAL /* Determine object capacity not slabs */ 5017 }; 5018 5019 #define SO_ALL (1 << SL_ALL) 5020 #define SO_PARTIAL (1 << SL_PARTIAL) 5021 #define SO_CPU (1 << SL_CPU) 5022 #define SO_OBJECTS (1 << SL_OBJECTS) 5023 #define SO_TOTAL (1 << SL_TOTAL) 5024 5025 static ssize_t show_slab_objects(struct kmem_cache *s, 5026 char *buf, unsigned long flags) 5027 { 5028 unsigned long total = 0; 5029 int node; 5030 int x; 5031 unsigned long *nodes; 5032 int len = 0; 5033 5034 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 5035 if (!nodes) 5036 return -ENOMEM; 5037 5038 if (flags & SO_CPU) { 5039 int cpu; 5040 5041 for_each_possible_cpu(cpu) { 5042 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 5043 cpu); 5044 int node; 5045 struct slab *slab; 5046 5047 slab = READ_ONCE(c->slab); 5048 if (!slab) 5049 continue; 5050 5051 node = slab_nid(slab); 5052 if (flags & SO_TOTAL) 5053 x = slab->objects; 5054 else if (flags & SO_OBJECTS) 5055 x = slab->inuse; 5056 else 5057 x = 1; 5058 5059 total += x; 5060 nodes[node] += x; 5061 5062 #ifdef CONFIG_SLUB_CPU_PARTIAL 5063 slab = slub_percpu_partial_read_once(c); 5064 if (slab) { 5065 node = slab_nid(slab); 5066 if (flags & SO_TOTAL) 5067 WARN_ON_ONCE(1); 5068 else if (flags & SO_OBJECTS) 5069 WARN_ON_ONCE(1); 5070 else 5071 x = slab->slabs; 5072 total += x; 5073 nodes[node] += x; 5074 } 5075 #endif 5076 } 5077 } 5078 5079 /* 5080 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 5081 * already held which will conflict with an existing lock order: 5082 * 5083 * mem_hotplug_lock->slab_mutex->kernfs_mutex 5084 * 5085 * We don't really need mem_hotplug_lock (to hold off 5086 * slab_mem_going_offline_callback) here because slab's memory hot 5087 * unplug code doesn't destroy the kmem_cache->node[] data. 5088 */ 5089 5090 #ifdef CONFIG_SLUB_DEBUG 5091 if (flags & SO_ALL) { 5092 struct kmem_cache_node *n; 5093 5094 for_each_kmem_cache_node(s, node, n) { 5095 5096 if (flags & SO_TOTAL) 5097 x = atomic_long_read(&n->total_objects); 5098 else if (flags & SO_OBJECTS) 5099 x = atomic_long_read(&n->total_objects) - 5100 count_partial(n, count_free); 5101 else 5102 x = atomic_long_read(&n->nr_slabs); 5103 total += x; 5104 nodes[node] += x; 5105 } 5106 5107 } else 5108 #endif 5109 if (flags & SO_PARTIAL) { 5110 struct kmem_cache_node *n; 5111 5112 for_each_kmem_cache_node(s, node, n) { 5113 if (flags & SO_TOTAL) 5114 x = count_partial(n, count_total); 5115 else if (flags & SO_OBJECTS) 5116 x = count_partial(n, count_inuse); 5117 else 5118 x = n->nr_partial; 5119 total += x; 5120 nodes[node] += x; 5121 } 5122 } 5123 5124 len += sysfs_emit_at(buf, len, "%lu", total); 5125 #ifdef CONFIG_NUMA 5126 for (node = 0; node < nr_node_ids; node++) { 5127 if (nodes[node]) 5128 len += sysfs_emit_at(buf, len, " N%d=%lu", 5129 node, nodes[node]); 5130 } 5131 #endif 5132 len += sysfs_emit_at(buf, len, "\n"); 5133 kfree(nodes); 5134 5135 return len; 5136 } 5137 5138 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5139 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5140 5141 struct slab_attribute { 5142 struct attribute attr; 5143 ssize_t (*show)(struct kmem_cache *s, char *buf); 5144 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5145 }; 5146 5147 #define SLAB_ATTR_RO(_name) \ 5148 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 5149 5150 #define SLAB_ATTR(_name) \ 5151 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 5152 5153 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5154 { 5155 return sysfs_emit(buf, "%u\n", s->size); 5156 } 5157 SLAB_ATTR_RO(slab_size); 5158 5159 static ssize_t align_show(struct kmem_cache *s, char *buf) 5160 { 5161 return sysfs_emit(buf, "%u\n", s->align); 5162 } 5163 SLAB_ATTR_RO(align); 5164 5165 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5166 { 5167 return sysfs_emit(buf, "%u\n", s->object_size); 5168 } 5169 SLAB_ATTR_RO(object_size); 5170 5171 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5172 { 5173 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5174 } 5175 SLAB_ATTR_RO(objs_per_slab); 5176 5177 static ssize_t order_show(struct kmem_cache *s, char *buf) 5178 { 5179 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5180 } 5181 SLAB_ATTR_RO(order); 5182 5183 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5184 { 5185 return sysfs_emit(buf, "%lu\n", s->min_partial); 5186 } 5187 5188 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5189 size_t length) 5190 { 5191 unsigned long min; 5192 int err; 5193 5194 err = kstrtoul(buf, 10, &min); 5195 if (err) 5196 return err; 5197 5198 s->min_partial = min; 5199 return length; 5200 } 5201 SLAB_ATTR(min_partial); 5202 5203 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5204 { 5205 unsigned int nr_partial = 0; 5206 #ifdef CONFIG_SLUB_CPU_PARTIAL 5207 nr_partial = s->cpu_partial; 5208 #endif 5209 5210 return sysfs_emit(buf, "%u\n", nr_partial); 5211 } 5212 5213 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5214 size_t length) 5215 { 5216 unsigned int objects; 5217 int err; 5218 5219 err = kstrtouint(buf, 10, &objects); 5220 if (err) 5221 return err; 5222 if (objects && !kmem_cache_has_cpu_partial(s)) 5223 return -EINVAL; 5224 5225 slub_set_cpu_partial(s, objects); 5226 flush_all(s); 5227 return length; 5228 } 5229 SLAB_ATTR(cpu_partial); 5230 5231 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5232 { 5233 if (!s->ctor) 5234 return 0; 5235 return sysfs_emit(buf, "%pS\n", s->ctor); 5236 } 5237 SLAB_ATTR_RO(ctor); 5238 5239 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5240 { 5241 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5242 } 5243 SLAB_ATTR_RO(aliases); 5244 5245 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5246 { 5247 return show_slab_objects(s, buf, SO_PARTIAL); 5248 } 5249 SLAB_ATTR_RO(partial); 5250 5251 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5252 { 5253 return show_slab_objects(s, buf, SO_CPU); 5254 } 5255 SLAB_ATTR_RO(cpu_slabs); 5256 5257 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5258 { 5259 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5260 } 5261 SLAB_ATTR_RO(objects); 5262 5263 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5264 { 5265 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5266 } 5267 SLAB_ATTR_RO(objects_partial); 5268 5269 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5270 { 5271 int objects = 0; 5272 int slabs = 0; 5273 int cpu __maybe_unused; 5274 int len = 0; 5275 5276 #ifdef CONFIG_SLUB_CPU_PARTIAL 5277 for_each_online_cpu(cpu) { 5278 struct slab *slab; 5279 5280 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5281 5282 if (slab) 5283 slabs += slab->slabs; 5284 } 5285 #endif 5286 5287 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 5288 objects = (slabs * oo_objects(s->oo)) / 2; 5289 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 5290 5291 #if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP) 5292 for_each_online_cpu(cpu) { 5293 struct slab *slab; 5294 5295 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5296 if (slab) { 5297 slabs = READ_ONCE(slab->slabs); 5298 objects = (slabs * oo_objects(s->oo)) / 2; 5299 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5300 cpu, objects, slabs); 5301 } 5302 } 5303 #endif 5304 len += sysfs_emit_at(buf, len, "\n"); 5305 5306 return len; 5307 } 5308 SLAB_ATTR_RO(slabs_cpu_partial); 5309 5310 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5311 { 5312 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5313 } 5314 SLAB_ATTR_RO(reclaim_account); 5315 5316 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5317 { 5318 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5319 } 5320 SLAB_ATTR_RO(hwcache_align); 5321 5322 #ifdef CONFIG_ZONE_DMA 5323 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5324 { 5325 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5326 } 5327 SLAB_ATTR_RO(cache_dma); 5328 #endif 5329 5330 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5331 { 5332 return sysfs_emit(buf, "%u\n", s->usersize); 5333 } 5334 SLAB_ATTR_RO(usersize); 5335 5336 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5337 { 5338 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5339 } 5340 SLAB_ATTR_RO(destroy_by_rcu); 5341 5342 #ifdef CONFIG_SLUB_DEBUG 5343 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5344 { 5345 return show_slab_objects(s, buf, SO_ALL); 5346 } 5347 SLAB_ATTR_RO(slabs); 5348 5349 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5350 { 5351 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5352 } 5353 SLAB_ATTR_RO(total_objects); 5354 5355 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5356 { 5357 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5358 } 5359 SLAB_ATTR_RO(sanity_checks); 5360 5361 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5362 { 5363 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5364 } 5365 SLAB_ATTR_RO(trace); 5366 5367 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5368 { 5369 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5370 } 5371 5372 SLAB_ATTR_RO(red_zone); 5373 5374 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5375 { 5376 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5377 } 5378 5379 SLAB_ATTR_RO(poison); 5380 5381 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5382 { 5383 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5384 } 5385 5386 SLAB_ATTR_RO(store_user); 5387 5388 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5389 { 5390 return 0; 5391 } 5392 5393 static ssize_t validate_store(struct kmem_cache *s, 5394 const char *buf, size_t length) 5395 { 5396 int ret = -EINVAL; 5397 5398 if (buf[0] == '1') { 5399 ret = validate_slab_cache(s); 5400 if (ret >= 0) 5401 ret = length; 5402 } 5403 return ret; 5404 } 5405 SLAB_ATTR(validate); 5406 5407 #endif /* CONFIG_SLUB_DEBUG */ 5408 5409 #ifdef CONFIG_FAILSLAB 5410 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5411 { 5412 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5413 } 5414 SLAB_ATTR_RO(failslab); 5415 #endif 5416 5417 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5418 { 5419 return 0; 5420 } 5421 5422 static ssize_t shrink_store(struct kmem_cache *s, 5423 const char *buf, size_t length) 5424 { 5425 if (buf[0] == '1') 5426 kmem_cache_shrink(s); 5427 else 5428 return -EINVAL; 5429 return length; 5430 } 5431 SLAB_ATTR(shrink); 5432 5433 #ifdef CONFIG_NUMA 5434 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5435 { 5436 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5437 } 5438 5439 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5440 const char *buf, size_t length) 5441 { 5442 unsigned int ratio; 5443 int err; 5444 5445 err = kstrtouint(buf, 10, &ratio); 5446 if (err) 5447 return err; 5448 if (ratio > 100) 5449 return -ERANGE; 5450 5451 s->remote_node_defrag_ratio = ratio * 10; 5452 5453 return length; 5454 } 5455 SLAB_ATTR(remote_node_defrag_ratio); 5456 #endif 5457 5458 #ifdef CONFIG_SLUB_STATS 5459 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5460 { 5461 unsigned long sum = 0; 5462 int cpu; 5463 int len = 0; 5464 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5465 5466 if (!data) 5467 return -ENOMEM; 5468 5469 for_each_online_cpu(cpu) { 5470 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5471 5472 data[cpu] = x; 5473 sum += x; 5474 } 5475 5476 len += sysfs_emit_at(buf, len, "%lu", sum); 5477 5478 #ifdef CONFIG_SMP 5479 for_each_online_cpu(cpu) { 5480 if (data[cpu]) 5481 len += sysfs_emit_at(buf, len, " C%d=%u", 5482 cpu, data[cpu]); 5483 } 5484 #endif 5485 kfree(data); 5486 len += sysfs_emit_at(buf, len, "\n"); 5487 5488 return len; 5489 } 5490 5491 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5492 { 5493 int cpu; 5494 5495 for_each_online_cpu(cpu) 5496 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5497 } 5498 5499 #define STAT_ATTR(si, text) \ 5500 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5501 { \ 5502 return show_stat(s, buf, si); \ 5503 } \ 5504 static ssize_t text##_store(struct kmem_cache *s, \ 5505 const char *buf, size_t length) \ 5506 { \ 5507 if (buf[0] != '0') \ 5508 return -EINVAL; \ 5509 clear_stat(s, si); \ 5510 return length; \ 5511 } \ 5512 SLAB_ATTR(text); \ 5513 5514 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5515 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5516 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5517 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5518 STAT_ATTR(FREE_FROZEN, free_frozen); 5519 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5520 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5521 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5522 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5523 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5524 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5525 STAT_ATTR(FREE_SLAB, free_slab); 5526 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5527 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5528 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5529 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5530 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5531 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5532 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5533 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5534 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5535 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5536 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5537 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5538 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5539 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5540 #endif /* CONFIG_SLUB_STATS */ 5541 5542 static struct attribute *slab_attrs[] = { 5543 &slab_size_attr.attr, 5544 &object_size_attr.attr, 5545 &objs_per_slab_attr.attr, 5546 &order_attr.attr, 5547 &min_partial_attr.attr, 5548 &cpu_partial_attr.attr, 5549 &objects_attr.attr, 5550 &objects_partial_attr.attr, 5551 &partial_attr.attr, 5552 &cpu_slabs_attr.attr, 5553 &ctor_attr.attr, 5554 &aliases_attr.attr, 5555 &align_attr.attr, 5556 &hwcache_align_attr.attr, 5557 &reclaim_account_attr.attr, 5558 &destroy_by_rcu_attr.attr, 5559 &shrink_attr.attr, 5560 &slabs_cpu_partial_attr.attr, 5561 #ifdef CONFIG_SLUB_DEBUG 5562 &total_objects_attr.attr, 5563 &slabs_attr.attr, 5564 &sanity_checks_attr.attr, 5565 &trace_attr.attr, 5566 &red_zone_attr.attr, 5567 &poison_attr.attr, 5568 &store_user_attr.attr, 5569 &validate_attr.attr, 5570 #endif 5571 #ifdef CONFIG_ZONE_DMA 5572 &cache_dma_attr.attr, 5573 #endif 5574 #ifdef CONFIG_NUMA 5575 &remote_node_defrag_ratio_attr.attr, 5576 #endif 5577 #ifdef CONFIG_SLUB_STATS 5578 &alloc_fastpath_attr.attr, 5579 &alloc_slowpath_attr.attr, 5580 &free_fastpath_attr.attr, 5581 &free_slowpath_attr.attr, 5582 &free_frozen_attr.attr, 5583 &free_add_partial_attr.attr, 5584 &free_remove_partial_attr.attr, 5585 &alloc_from_partial_attr.attr, 5586 &alloc_slab_attr.attr, 5587 &alloc_refill_attr.attr, 5588 &alloc_node_mismatch_attr.attr, 5589 &free_slab_attr.attr, 5590 &cpuslab_flush_attr.attr, 5591 &deactivate_full_attr.attr, 5592 &deactivate_empty_attr.attr, 5593 &deactivate_to_head_attr.attr, 5594 &deactivate_to_tail_attr.attr, 5595 &deactivate_remote_frees_attr.attr, 5596 &deactivate_bypass_attr.attr, 5597 &order_fallback_attr.attr, 5598 &cmpxchg_double_fail_attr.attr, 5599 &cmpxchg_double_cpu_fail_attr.attr, 5600 &cpu_partial_alloc_attr.attr, 5601 &cpu_partial_free_attr.attr, 5602 &cpu_partial_node_attr.attr, 5603 &cpu_partial_drain_attr.attr, 5604 #endif 5605 #ifdef CONFIG_FAILSLAB 5606 &failslab_attr.attr, 5607 #endif 5608 &usersize_attr.attr, 5609 5610 NULL 5611 }; 5612 5613 static const struct attribute_group slab_attr_group = { 5614 .attrs = slab_attrs, 5615 }; 5616 5617 static ssize_t slab_attr_show(struct kobject *kobj, 5618 struct attribute *attr, 5619 char *buf) 5620 { 5621 struct slab_attribute *attribute; 5622 struct kmem_cache *s; 5623 int err; 5624 5625 attribute = to_slab_attr(attr); 5626 s = to_slab(kobj); 5627 5628 if (!attribute->show) 5629 return -EIO; 5630 5631 err = attribute->show(s, buf); 5632 5633 return err; 5634 } 5635 5636 static ssize_t slab_attr_store(struct kobject *kobj, 5637 struct attribute *attr, 5638 const char *buf, size_t len) 5639 { 5640 struct slab_attribute *attribute; 5641 struct kmem_cache *s; 5642 int err; 5643 5644 attribute = to_slab_attr(attr); 5645 s = to_slab(kobj); 5646 5647 if (!attribute->store) 5648 return -EIO; 5649 5650 err = attribute->store(s, buf, len); 5651 return err; 5652 } 5653 5654 static void kmem_cache_release(struct kobject *k) 5655 { 5656 slab_kmem_cache_release(to_slab(k)); 5657 } 5658 5659 static const struct sysfs_ops slab_sysfs_ops = { 5660 .show = slab_attr_show, 5661 .store = slab_attr_store, 5662 }; 5663 5664 static struct kobj_type slab_ktype = { 5665 .sysfs_ops = &slab_sysfs_ops, 5666 .release = kmem_cache_release, 5667 }; 5668 5669 static struct kset *slab_kset; 5670 5671 static inline struct kset *cache_kset(struct kmem_cache *s) 5672 { 5673 return slab_kset; 5674 } 5675 5676 #define ID_STR_LENGTH 64 5677 5678 /* Create a unique string id for a slab cache: 5679 * 5680 * Format :[flags-]size 5681 */ 5682 static char *create_unique_id(struct kmem_cache *s) 5683 { 5684 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5685 char *p = name; 5686 5687 BUG_ON(!name); 5688 5689 *p++ = ':'; 5690 /* 5691 * First flags affecting slabcache operations. We will only 5692 * get here for aliasable slabs so we do not need to support 5693 * too many flags. The flags here must cover all flags that 5694 * are matched during merging to guarantee that the id is 5695 * unique. 5696 */ 5697 if (s->flags & SLAB_CACHE_DMA) 5698 *p++ = 'd'; 5699 if (s->flags & SLAB_CACHE_DMA32) 5700 *p++ = 'D'; 5701 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5702 *p++ = 'a'; 5703 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5704 *p++ = 'F'; 5705 if (s->flags & SLAB_ACCOUNT) 5706 *p++ = 'A'; 5707 if (p != name + 1) 5708 *p++ = '-'; 5709 p += sprintf(p, "%07u", s->size); 5710 5711 BUG_ON(p > name + ID_STR_LENGTH - 1); 5712 return name; 5713 } 5714 5715 static int sysfs_slab_add(struct kmem_cache *s) 5716 { 5717 int err; 5718 const char *name; 5719 struct kset *kset = cache_kset(s); 5720 int unmergeable = slab_unmergeable(s); 5721 5722 if (!kset) { 5723 kobject_init(&s->kobj, &slab_ktype); 5724 return 0; 5725 } 5726 5727 if (!unmergeable && disable_higher_order_debug && 5728 (slub_debug & DEBUG_METADATA_FLAGS)) 5729 unmergeable = 1; 5730 5731 if (unmergeable) { 5732 /* 5733 * Slabcache can never be merged so we can use the name proper. 5734 * This is typically the case for debug situations. In that 5735 * case we can catch duplicate names easily. 5736 */ 5737 sysfs_remove_link(&slab_kset->kobj, s->name); 5738 name = s->name; 5739 } else { 5740 /* 5741 * Create a unique name for the slab as a target 5742 * for the symlinks. 5743 */ 5744 name = create_unique_id(s); 5745 } 5746 5747 s->kobj.kset = kset; 5748 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5749 if (err) 5750 goto out; 5751 5752 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5753 if (err) 5754 goto out_del_kobj; 5755 5756 if (!unmergeable) { 5757 /* Setup first alias */ 5758 sysfs_slab_alias(s, s->name); 5759 } 5760 out: 5761 if (!unmergeable) 5762 kfree(name); 5763 return err; 5764 out_del_kobj: 5765 kobject_del(&s->kobj); 5766 goto out; 5767 } 5768 5769 void sysfs_slab_unlink(struct kmem_cache *s) 5770 { 5771 if (slab_state >= FULL) 5772 kobject_del(&s->kobj); 5773 } 5774 5775 void sysfs_slab_release(struct kmem_cache *s) 5776 { 5777 if (slab_state >= FULL) 5778 kobject_put(&s->kobj); 5779 } 5780 5781 /* 5782 * Need to buffer aliases during bootup until sysfs becomes 5783 * available lest we lose that information. 5784 */ 5785 struct saved_alias { 5786 struct kmem_cache *s; 5787 const char *name; 5788 struct saved_alias *next; 5789 }; 5790 5791 static struct saved_alias *alias_list; 5792 5793 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5794 { 5795 struct saved_alias *al; 5796 5797 if (slab_state == FULL) { 5798 /* 5799 * If we have a leftover link then remove it. 5800 */ 5801 sysfs_remove_link(&slab_kset->kobj, name); 5802 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5803 } 5804 5805 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5806 if (!al) 5807 return -ENOMEM; 5808 5809 al->s = s; 5810 al->name = name; 5811 al->next = alias_list; 5812 alias_list = al; 5813 return 0; 5814 } 5815 5816 static int __init slab_sysfs_init(void) 5817 { 5818 struct kmem_cache *s; 5819 int err; 5820 5821 mutex_lock(&slab_mutex); 5822 5823 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5824 if (!slab_kset) { 5825 mutex_unlock(&slab_mutex); 5826 pr_err("Cannot register slab subsystem.\n"); 5827 return -ENOSYS; 5828 } 5829 5830 slab_state = FULL; 5831 5832 list_for_each_entry(s, &slab_caches, list) { 5833 err = sysfs_slab_add(s); 5834 if (err) 5835 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5836 s->name); 5837 } 5838 5839 while (alias_list) { 5840 struct saved_alias *al = alias_list; 5841 5842 alias_list = alias_list->next; 5843 err = sysfs_slab_alias(al->s, al->name); 5844 if (err) 5845 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5846 al->name); 5847 kfree(al); 5848 } 5849 5850 mutex_unlock(&slab_mutex); 5851 return 0; 5852 } 5853 5854 __initcall(slab_sysfs_init); 5855 #endif /* CONFIG_SYSFS */ 5856 5857 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 5858 static int slab_debugfs_show(struct seq_file *seq, void *v) 5859 { 5860 struct loc_track *t = seq->private; 5861 struct location *l; 5862 unsigned long idx; 5863 5864 idx = (unsigned long) t->idx; 5865 if (idx < t->count) { 5866 l = &t->loc[idx]; 5867 5868 seq_printf(seq, "%7ld ", l->count); 5869 5870 if (l->addr) 5871 seq_printf(seq, "%pS", (void *)l->addr); 5872 else 5873 seq_puts(seq, "<not-available>"); 5874 5875 if (l->sum_time != l->min_time) { 5876 seq_printf(seq, " age=%ld/%llu/%ld", 5877 l->min_time, div_u64(l->sum_time, l->count), 5878 l->max_time); 5879 } else 5880 seq_printf(seq, " age=%ld", l->min_time); 5881 5882 if (l->min_pid != l->max_pid) 5883 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 5884 else 5885 seq_printf(seq, " pid=%ld", 5886 l->min_pid); 5887 5888 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 5889 seq_printf(seq, " cpus=%*pbl", 5890 cpumask_pr_args(to_cpumask(l->cpus))); 5891 5892 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 5893 seq_printf(seq, " nodes=%*pbl", 5894 nodemask_pr_args(&l->nodes)); 5895 5896 #ifdef CONFIG_STACKDEPOT 5897 { 5898 depot_stack_handle_t handle; 5899 unsigned long *entries; 5900 unsigned int nr_entries, j; 5901 5902 handle = READ_ONCE(l->handle); 5903 if (handle) { 5904 nr_entries = stack_depot_fetch(handle, &entries); 5905 seq_puts(seq, "\n"); 5906 for (j = 0; j < nr_entries; j++) 5907 seq_printf(seq, " %pS\n", (void *)entries[j]); 5908 } 5909 } 5910 #endif 5911 seq_puts(seq, "\n"); 5912 } 5913 5914 if (!idx && !t->count) 5915 seq_puts(seq, "No data\n"); 5916 5917 return 0; 5918 } 5919 5920 static void slab_debugfs_stop(struct seq_file *seq, void *v) 5921 { 5922 } 5923 5924 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 5925 { 5926 struct loc_track *t = seq->private; 5927 5928 t->idx = ++(*ppos); 5929 if (*ppos <= t->count) 5930 return ppos; 5931 5932 return NULL; 5933 } 5934 5935 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 5936 { 5937 struct location *loc1 = (struct location *)a; 5938 struct location *loc2 = (struct location *)b; 5939 5940 if (loc1->count > loc2->count) 5941 return -1; 5942 else 5943 return 1; 5944 } 5945 5946 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 5947 { 5948 struct loc_track *t = seq->private; 5949 5950 t->idx = *ppos; 5951 return ppos; 5952 } 5953 5954 static const struct seq_operations slab_debugfs_sops = { 5955 .start = slab_debugfs_start, 5956 .next = slab_debugfs_next, 5957 .stop = slab_debugfs_stop, 5958 .show = slab_debugfs_show, 5959 }; 5960 5961 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 5962 { 5963 5964 struct kmem_cache_node *n; 5965 enum track_item alloc; 5966 int node; 5967 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 5968 sizeof(struct loc_track)); 5969 struct kmem_cache *s = file_inode(filep)->i_private; 5970 unsigned long *obj_map; 5971 5972 if (!t) 5973 return -ENOMEM; 5974 5975 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5976 if (!obj_map) { 5977 seq_release_private(inode, filep); 5978 return -ENOMEM; 5979 } 5980 5981 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 5982 alloc = TRACK_ALLOC; 5983 else 5984 alloc = TRACK_FREE; 5985 5986 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 5987 bitmap_free(obj_map); 5988 seq_release_private(inode, filep); 5989 return -ENOMEM; 5990 } 5991 5992 for_each_kmem_cache_node(s, node, n) { 5993 unsigned long flags; 5994 struct slab *slab; 5995 5996 if (!atomic_long_read(&n->nr_slabs)) 5997 continue; 5998 5999 spin_lock_irqsave(&n->list_lock, flags); 6000 list_for_each_entry(slab, &n->partial, slab_list) 6001 process_slab(t, s, slab, alloc, obj_map); 6002 list_for_each_entry(slab, &n->full, slab_list) 6003 process_slab(t, s, slab, alloc, obj_map); 6004 spin_unlock_irqrestore(&n->list_lock, flags); 6005 } 6006 6007 /* Sort locations by count */ 6008 sort_r(t->loc, t->count, sizeof(struct location), 6009 cmp_loc_by_count, NULL, NULL); 6010 6011 bitmap_free(obj_map); 6012 return 0; 6013 } 6014 6015 static int slab_debug_trace_release(struct inode *inode, struct file *file) 6016 { 6017 struct seq_file *seq = file->private_data; 6018 struct loc_track *t = seq->private; 6019 6020 free_loc_track(t); 6021 return seq_release_private(inode, file); 6022 } 6023 6024 static const struct file_operations slab_debugfs_fops = { 6025 .open = slab_debug_trace_open, 6026 .read = seq_read, 6027 .llseek = seq_lseek, 6028 .release = slab_debug_trace_release, 6029 }; 6030 6031 static void debugfs_slab_add(struct kmem_cache *s) 6032 { 6033 struct dentry *slab_cache_dir; 6034 6035 if (unlikely(!slab_debugfs_root)) 6036 return; 6037 6038 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 6039 6040 debugfs_create_file("alloc_traces", 0400, 6041 slab_cache_dir, s, &slab_debugfs_fops); 6042 6043 debugfs_create_file("free_traces", 0400, 6044 slab_cache_dir, s, &slab_debugfs_fops); 6045 } 6046 6047 void debugfs_slab_release(struct kmem_cache *s) 6048 { 6049 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); 6050 } 6051 6052 static int __init slab_debugfs_init(void) 6053 { 6054 struct kmem_cache *s; 6055 6056 slab_debugfs_root = debugfs_create_dir("slab", NULL); 6057 6058 list_for_each_entry(s, &slab_caches, list) 6059 if (s->flags & SLAB_STORE_USER) 6060 debugfs_slab_add(s); 6061 6062 return 0; 6063 6064 } 6065 __initcall(slab_debugfs_init); 6066 #endif 6067 /* 6068 * The /proc/slabinfo ABI 6069 */ 6070 #ifdef CONFIG_SLUB_DEBUG 6071 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 6072 { 6073 unsigned long nr_slabs = 0; 6074 unsigned long nr_objs = 0; 6075 unsigned long nr_free = 0; 6076 int node; 6077 struct kmem_cache_node *n; 6078 6079 for_each_kmem_cache_node(s, node, n) { 6080 nr_slabs += node_nr_slabs(n); 6081 nr_objs += node_nr_objs(n); 6082 nr_free += count_partial(n, count_free); 6083 } 6084 6085 sinfo->active_objs = nr_objs - nr_free; 6086 sinfo->num_objs = nr_objs; 6087 sinfo->active_slabs = nr_slabs; 6088 sinfo->num_slabs = nr_slabs; 6089 sinfo->objects_per_slab = oo_objects(s->oo); 6090 sinfo->cache_order = oo_order(s->oo); 6091 } 6092 6093 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 6094 { 6095 } 6096 6097 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 6098 size_t count, loff_t *ppos) 6099 { 6100 return -EIO; 6101 } 6102 #endif /* CONFIG_SLUB_DEBUG */ 6103