1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/mempolicy.h> 28 #include <linux/ctype.h> 29 #include <linux/stackdepot.h> 30 #include <linux/debugobjects.h> 31 #include <linux/kallsyms.h> 32 #include <linux/kfence.h> 33 #include <linux/memory.h> 34 #include <linux/math64.h> 35 #include <linux/fault-inject.h> 36 #include <linux/stacktrace.h> 37 #include <linux/prefetch.h> 38 #include <linux/memcontrol.h> 39 #include <linux/random.h> 40 #include <kunit/test.h> 41 42 #include <linux/debugfs.h> 43 #include <trace/events/kmem.h> 44 45 #include "internal.h" 46 47 /* 48 * Lock order: 49 * 1. slab_mutex (Global Mutex) 50 * 2. node->list_lock 51 * 3. slab_lock(page) (Only on some arches and for debugging) 52 * 53 * slab_mutex 54 * 55 * The role of the slab_mutex is to protect the list of all the slabs 56 * and to synchronize major metadata changes to slab cache structures. 57 * 58 * The slab_lock is only used for debugging and on arches that do not 59 * have the ability to do a cmpxchg_double. It only protects: 60 * A. page->freelist -> List of object free in a page 61 * B. page->inuse -> Number of objects in use 62 * C. page->objects -> Number of objects in page 63 * D. page->frozen -> frozen state 64 * 65 * If a slab is frozen then it is exempt from list management. It is not 66 * on any list except per cpu partial list. The processor that froze the 67 * slab is the one who can perform list operations on the page. Other 68 * processors may put objects onto the freelist but the processor that 69 * froze the slab is the only one that can retrieve the objects from the 70 * page's freelist. 71 * 72 * The list_lock protects the partial and full list on each node and 73 * the partial slab counter. If taken then no new slabs may be added or 74 * removed from the lists nor make the number of partial slabs be modified. 75 * (Note that the total number of slabs is an atomic value that may be 76 * modified without taking the list lock). 77 * 78 * The list_lock is a centralized lock and thus we avoid taking it as 79 * much as possible. As long as SLUB does not have to handle partial 80 * slabs, operations can continue without any centralized lock. F.e. 81 * allocating a long series of objects that fill up slabs does not require 82 * the list lock. 83 * Interrupts are disabled during allocation and deallocation in order to 84 * make the slab allocator safe to use in the context of an irq. In addition 85 * interrupts are disabled to ensure that the processor does not change 86 * while handling per_cpu slabs, due to kernel preemption. 87 * 88 * SLUB assigns one slab for allocation to each processor. 89 * Allocations only occur from these slabs called cpu slabs. 90 * 91 * Slabs with free elements are kept on a partial list and during regular 92 * operations no list for full slabs is used. If an object in a full slab is 93 * freed then the slab will show up again on the partial lists. 94 * We track full slabs for debugging purposes though because otherwise we 95 * cannot scan all objects. 96 * 97 * Slabs are freed when they become empty. Teardown and setup is 98 * minimal so we rely on the page allocators per cpu caches for 99 * fast frees and allocs. 100 * 101 * page->frozen The slab is frozen and exempt from list processing. 102 * This means that the slab is dedicated to a purpose 103 * such as satisfying allocations for a specific 104 * processor. Objects may be freed in the slab while 105 * it is frozen but slab_free will then skip the usual 106 * list operations. It is up to the processor holding 107 * the slab to integrate the slab into the slab lists 108 * when the slab is no longer needed. 109 * 110 * One use of this flag is to mark slabs that are 111 * used for allocations. Then such a slab becomes a cpu 112 * slab. The cpu slab may be equipped with an additional 113 * freelist that allows lockless access to 114 * free objects in addition to the regular freelist 115 * that requires the slab lock. 116 * 117 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 118 * options set. This moves slab handling out of 119 * the fast path and disables lockless freelists. 120 */ 121 122 #ifdef CONFIG_SLUB_DEBUG 123 124 #ifdef CONFIG_SLUB_DEBUG_ON 125 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 126 #else 127 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 128 #endif 129 130 static inline bool __slub_debug_enabled(void) 131 { 132 return static_branch_unlikely(&slub_debug_enabled); 133 } 134 135 #else /* CONFIG_SLUB_DEBUG */ 136 137 static inline bool __slub_debug_enabled(void) 138 { 139 return false; 140 } 141 142 #endif /* CONFIG_SLUB_DEBUG */ 143 144 static inline bool kmem_cache_debug(struct kmem_cache *s) 145 { 146 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 147 } 148 149 void *fixup_red_left(struct kmem_cache *s, void *p) 150 { 151 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 152 p += s->red_left_pad; 153 154 return p; 155 } 156 157 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 158 { 159 #ifdef CONFIG_SLUB_CPU_PARTIAL 160 return !kmem_cache_debug(s); 161 #else 162 return false; 163 #endif 164 } 165 166 /* 167 * Issues still to be resolved: 168 * 169 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 170 * 171 * - Variable sizing of the per node arrays 172 */ 173 174 /* Enable to log cmpxchg failures */ 175 #undef SLUB_DEBUG_CMPXCHG 176 177 /* 178 * Minimum number of partial slabs. These will be left on the partial 179 * lists even if they are empty. kmem_cache_shrink may reclaim them. 180 */ 181 #define MIN_PARTIAL 5 182 183 /* 184 * Maximum number of desirable partial slabs. 185 * The existence of more partial slabs makes kmem_cache_shrink 186 * sort the partial list by the number of objects in use. 187 */ 188 #define MAX_PARTIAL 10 189 190 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 191 SLAB_POISON | SLAB_STORE_USER) 192 193 /* 194 * These debug flags cannot use CMPXCHG because there might be consistency 195 * issues when checking or reading debug information 196 */ 197 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 198 SLAB_TRACE) 199 200 201 /* 202 * Debugging flags that require metadata to be stored in the slab. These get 203 * disabled when slub_debug=O is used and a cache's min order increases with 204 * metadata. 205 */ 206 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 207 208 #define OO_SHIFT 16 209 #define OO_MASK ((1 << OO_SHIFT) - 1) 210 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 211 212 /* Internal SLUB flags */ 213 /* Poison object */ 214 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 215 /* Use cmpxchg_double */ 216 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 217 218 /* 219 * Tracking user of a slab. 220 */ 221 #define TRACK_ADDRS_COUNT 16 222 struct track { 223 unsigned long addr; /* Called from address */ 224 #ifdef CONFIG_STACKDEPOT 225 depot_stack_handle_t handle; 226 #endif 227 int cpu; /* Was running on cpu */ 228 int pid; /* Pid context */ 229 unsigned long when; /* When did the operation occur */ 230 }; 231 232 enum track_item { TRACK_ALLOC, TRACK_FREE }; 233 234 #ifdef CONFIG_SYSFS 235 static int sysfs_slab_add(struct kmem_cache *); 236 static int sysfs_slab_alias(struct kmem_cache *, const char *); 237 #else 238 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 239 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 240 { return 0; } 241 #endif 242 243 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 244 static void debugfs_slab_add(struct kmem_cache *); 245 #else 246 static inline void debugfs_slab_add(struct kmem_cache *s) { } 247 #endif 248 249 static inline void stat(const struct kmem_cache *s, enum stat_item si) 250 { 251 #ifdef CONFIG_SLUB_STATS 252 /* 253 * The rmw is racy on a preemptible kernel but this is acceptable, so 254 * avoid this_cpu_add()'s irq-disable overhead. 255 */ 256 raw_cpu_inc(s->cpu_slab->stat[si]); 257 #endif 258 } 259 260 /* 261 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 262 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 263 * differ during memory hotplug/hotremove operations. 264 * Protected by slab_mutex. 265 */ 266 static nodemask_t slab_nodes; 267 268 /******************************************************************** 269 * Core slab cache functions 270 *******************************************************************/ 271 272 /* 273 * Returns freelist pointer (ptr). With hardening, this is obfuscated 274 * with an XOR of the address where the pointer is held and a per-cache 275 * random number. 276 */ 277 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 278 unsigned long ptr_addr) 279 { 280 #ifdef CONFIG_SLAB_FREELIST_HARDENED 281 /* 282 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 283 * Normally, this doesn't cause any issues, as both set_freepointer() 284 * and get_freepointer() are called with a pointer with the same tag. 285 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 286 * example, when __free_slub() iterates over objects in a cache, it 287 * passes untagged pointers to check_object(). check_object() in turns 288 * calls get_freepointer() with an untagged pointer, which causes the 289 * freepointer to be restored incorrectly. 290 */ 291 return (void *)((unsigned long)ptr ^ s->random ^ 292 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 293 #else 294 return ptr; 295 #endif 296 } 297 298 /* Returns the freelist pointer recorded at location ptr_addr. */ 299 static inline void *freelist_dereference(const struct kmem_cache *s, 300 void *ptr_addr) 301 { 302 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 303 (unsigned long)ptr_addr); 304 } 305 306 static inline void *get_freepointer(struct kmem_cache *s, void *object) 307 { 308 object = kasan_reset_tag(object); 309 return freelist_dereference(s, object + s->offset); 310 } 311 312 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 313 { 314 prefetch(object + s->offset); 315 } 316 317 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 318 { 319 unsigned long freepointer_addr; 320 void *p; 321 322 if (!debug_pagealloc_enabled_static()) 323 return get_freepointer(s, object); 324 325 object = kasan_reset_tag(object); 326 freepointer_addr = (unsigned long)object + s->offset; 327 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 328 return freelist_ptr(s, p, freepointer_addr); 329 } 330 331 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 332 { 333 unsigned long freeptr_addr = (unsigned long)object + s->offset; 334 335 #ifdef CONFIG_SLAB_FREELIST_HARDENED 336 BUG_ON(object == fp); /* naive detection of double free or corruption */ 337 #endif 338 339 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 340 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 341 } 342 343 /* Loop over all objects in a slab */ 344 #define for_each_object(__p, __s, __addr, __objects) \ 345 for (__p = fixup_red_left(__s, __addr); \ 346 __p < (__addr) + (__objects) * (__s)->size; \ 347 __p += (__s)->size) 348 349 static inline unsigned int order_objects(unsigned int order, unsigned int size) 350 { 351 return ((unsigned int)PAGE_SIZE << order) / size; 352 } 353 354 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 355 unsigned int size) 356 { 357 struct kmem_cache_order_objects x = { 358 (order << OO_SHIFT) + order_objects(order, size) 359 }; 360 361 return x; 362 } 363 364 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 365 { 366 return x.x >> OO_SHIFT; 367 } 368 369 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 370 { 371 return x.x & OO_MASK; 372 } 373 374 /* 375 * Per slab locking using the pagelock 376 */ 377 static __always_inline void slab_lock(struct page *page) 378 { 379 VM_BUG_ON_PAGE(PageTail(page), page); 380 bit_spin_lock(PG_locked, &page->flags); 381 } 382 383 static __always_inline void slab_unlock(struct page *page) 384 { 385 VM_BUG_ON_PAGE(PageTail(page), page); 386 __bit_spin_unlock(PG_locked, &page->flags); 387 } 388 389 /* Interrupts must be disabled (for the fallback code to work right) */ 390 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 391 void *freelist_old, unsigned long counters_old, 392 void *freelist_new, unsigned long counters_new, 393 const char *n) 394 { 395 VM_BUG_ON(!irqs_disabled()); 396 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 397 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 398 if (s->flags & __CMPXCHG_DOUBLE) { 399 if (cmpxchg_double(&page->freelist, &page->counters, 400 freelist_old, counters_old, 401 freelist_new, counters_new)) 402 return true; 403 } else 404 #endif 405 { 406 slab_lock(page); 407 if (page->freelist == freelist_old && 408 page->counters == counters_old) { 409 page->freelist = freelist_new; 410 page->counters = counters_new; 411 slab_unlock(page); 412 return true; 413 } 414 slab_unlock(page); 415 } 416 417 cpu_relax(); 418 stat(s, CMPXCHG_DOUBLE_FAIL); 419 420 #ifdef SLUB_DEBUG_CMPXCHG 421 pr_info("%s %s: cmpxchg double redo ", n, s->name); 422 #endif 423 424 return false; 425 } 426 427 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 428 void *freelist_old, unsigned long counters_old, 429 void *freelist_new, unsigned long counters_new, 430 const char *n) 431 { 432 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 433 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 434 if (s->flags & __CMPXCHG_DOUBLE) { 435 if (cmpxchg_double(&page->freelist, &page->counters, 436 freelist_old, counters_old, 437 freelist_new, counters_new)) 438 return true; 439 } else 440 #endif 441 { 442 unsigned long flags; 443 444 local_irq_save(flags); 445 slab_lock(page); 446 if (page->freelist == freelist_old && 447 page->counters == counters_old) { 448 page->freelist = freelist_new; 449 page->counters = counters_new; 450 slab_unlock(page); 451 local_irq_restore(flags); 452 return true; 453 } 454 slab_unlock(page); 455 local_irq_restore(flags); 456 } 457 458 cpu_relax(); 459 stat(s, CMPXCHG_DOUBLE_FAIL); 460 461 #ifdef SLUB_DEBUG_CMPXCHG 462 pr_info("%s %s: cmpxchg double redo ", n, s->name); 463 #endif 464 465 return false; 466 } 467 468 #ifdef CONFIG_SLUB_DEBUG 469 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 470 static DEFINE_SPINLOCK(object_map_lock); 471 472 #if IS_ENABLED(CONFIG_KUNIT) 473 static bool slab_add_kunit_errors(void) 474 { 475 struct kunit_resource *resource; 476 477 if (likely(!current->kunit_test)) 478 return false; 479 480 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 481 if (!resource) 482 return false; 483 484 (*(int *)resource->data)++; 485 kunit_put_resource(resource); 486 return true; 487 } 488 #else 489 static inline bool slab_add_kunit_errors(void) { return false; } 490 #endif 491 492 /* 493 * Determine a map of object in use on a page. 494 * 495 * Node listlock must be held to guarantee that the page does 496 * not vanish from under us. 497 */ 498 static unsigned long *get_map(struct kmem_cache *s, struct page *page) 499 __acquires(&object_map_lock) 500 { 501 void *p; 502 void *addr = page_address(page); 503 504 VM_BUG_ON(!irqs_disabled()); 505 506 spin_lock(&object_map_lock); 507 508 bitmap_zero(object_map, page->objects); 509 510 for (p = page->freelist; p; p = get_freepointer(s, p)) 511 set_bit(__obj_to_index(s, addr, p), object_map); 512 513 return object_map; 514 } 515 516 static void put_map(unsigned long *map) __releases(&object_map_lock) 517 { 518 VM_BUG_ON(map != object_map); 519 spin_unlock(&object_map_lock); 520 } 521 522 static inline unsigned int size_from_object(struct kmem_cache *s) 523 { 524 if (s->flags & SLAB_RED_ZONE) 525 return s->size - s->red_left_pad; 526 527 return s->size; 528 } 529 530 static inline void *restore_red_left(struct kmem_cache *s, void *p) 531 { 532 if (s->flags & SLAB_RED_ZONE) 533 p -= s->red_left_pad; 534 535 return p; 536 } 537 538 /* 539 * Debug settings: 540 */ 541 #if defined(CONFIG_SLUB_DEBUG_ON) 542 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 543 #else 544 static slab_flags_t slub_debug; 545 #endif 546 547 static char *slub_debug_string; 548 static int disable_higher_order_debug; 549 550 /* 551 * slub is about to manipulate internal object metadata. This memory lies 552 * outside the range of the allocated object, so accessing it would normally 553 * be reported by kasan as a bounds error. metadata_access_enable() is used 554 * to tell kasan that these accesses are OK. 555 */ 556 static inline void metadata_access_enable(void) 557 { 558 kasan_disable_current(); 559 } 560 561 static inline void metadata_access_disable(void) 562 { 563 kasan_enable_current(); 564 } 565 566 /* 567 * Object debugging 568 */ 569 570 /* Verify that a pointer has an address that is valid within a slab page */ 571 static inline int check_valid_pointer(struct kmem_cache *s, 572 struct page *page, void *object) 573 { 574 void *base; 575 576 if (!object) 577 return 1; 578 579 base = page_address(page); 580 object = kasan_reset_tag(object); 581 object = restore_red_left(s, object); 582 if (object < base || object >= base + page->objects * s->size || 583 (object - base) % s->size) { 584 return 0; 585 } 586 587 return 1; 588 } 589 590 static void print_section(char *level, char *text, u8 *addr, 591 unsigned int length) 592 { 593 metadata_access_enable(); 594 print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, 595 16, 1, addr, length, 1); 596 metadata_access_disable(); 597 } 598 599 /* 600 * See comment in calculate_sizes(). 601 */ 602 static inline bool freeptr_outside_object(struct kmem_cache *s) 603 { 604 return s->offset >= s->inuse; 605 } 606 607 /* 608 * Return offset of the end of info block which is inuse + free pointer if 609 * not overlapping with object. 610 */ 611 static inline unsigned int get_info_end(struct kmem_cache *s) 612 { 613 if (freeptr_outside_object(s)) 614 return s->inuse + sizeof(void *); 615 else 616 return s->inuse; 617 } 618 619 static struct track *get_track(struct kmem_cache *s, void *object, 620 enum track_item alloc) 621 { 622 struct track *p; 623 624 p = object + get_info_end(s); 625 626 return kasan_reset_tag(p + alloc); 627 } 628 629 #ifdef CONFIG_STACKDEPOT 630 static depot_stack_handle_t save_stack_depot_trace(gfp_t flags) 631 { 632 unsigned long entries[TRACK_ADDRS_COUNT]; 633 depot_stack_handle_t handle; 634 unsigned int nr_entries; 635 636 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4); 637 handle = stack_depot_save(entries, nr_entries, flags); 638 return handle; 639 } 640 #endif 641 642 static void set_track(struct kmem_cache *s, void *object, 643 enum track_item alloc, unsigned long addr) 644 { 645 struct track *p = get_track(s, object, alloc); 646 647 if (addr) { 648 #ifdef CONFIG_STACKDEPOT 649 p->handle = save_stack_depot_trace(GFP_NOWAIT); 650 #endif 651 p->addr = addr; 652 p->cpu = smp_processor_id(); 653 p->pid = current->pid; 654 p->when = jiffies; 655 } else { 656 memset(p, 0, sizeof(struct track)); 657 } 658 } 659 660 static void init_tracking(struct kmem_cache *s, void *object) 661 { 662 if (!(s->flags & SLAB_STORE_USER)) 663 return; 664 665 set_track(s, object, TRACK_FREE, 0UL); 666 set_track(s, object, TRACK_ALLOC, 0UL); 667 } 668 669 static void print_track(const char *s, struct track *t, unsigned long pr_time) 670 { 671 if (!t->addr) 672 return; 673 674 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 675 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 676 #ifdef CONFIG_STACKDEPOT 677 { 678 depot_stack_handle_t handle; 679 unsigned long *entries; 680 unsigned int nr_entries; 681 682 handle = READ_ONCE(t->handle); 683 if (!handle) { 684 pr_err("object allocation/free stack trace missing\n"); 685 } else { 686 nr_entries = stack_depot_fetch(handle, &entries); 687 stack_trace_print(entries, nr_entries, 0); 688 } 689 } 690 #endif 691 } 692 693 void print_tracking(struct kmem_cache *s, void *object) 694 { 695 unsigned long pr_time = jiffies; 696 if (!(s->flags & SLAB_STORE_USER)) 697 return; 698 699 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 700 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 701 } 702 703 static void print_page_info(struct page *page) 704 { 705 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", 706 page, page->objects, page->inuse, page->freelist, 707 page->flags, &page->flags); 708 709 } 710 711 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 712 { 713 struct va_format vaf; 714 va_list args; 715 716 va_start(args, fmt); 717 vaf.fmt = fmt; 718 vaf.va = &args; 719 pr_err("=============================================================================\n"); 720 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 721 pr_err("-----------------------------------------------------------------------------\n\n"); 722 va_end(args); 723 } 724 725 __printf(2, 3) 726 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 727 { 728 struct va_format vaf; 729 va_list args; 730 731 if (slab_add_kunit_errors()) 732 return; 733 734 va_start(args, fmt); 735 vaf.fmt = fmt; 736 vaf.va = &args; 737 pr_err("FIX %s: %pV\n", s->name, &vaf); 738 va_end(args); 739 } 740 741 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 742 void **freelist, void *nextfree) 743 { 744 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 745 !check_valid_pointer(s, page, nextfree) && freelist) { 746 object_err(s, page, *freelist, "Freechain corrupt"); 747 *freelist = NULL; 748 slab_fix(s, "Isolate corrupted freechain"); 749 return true; 750 } 751 752 return false; 753 } 754 755 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 756 { 757 unsigned int off; /* Offset of last byte */ 758 u8 *addr = page_address(page); 759 760 print_tracking(s, p); 761 762 print_page_info(page); 763 764 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 765 p, p - addr, get_freepointer(s, p)); 766 767 if (s->flags & SLAB_RED_ZONE) 768 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 769 s->red_left_pad); 770 else if (p > addr + 16) 771 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 772 773 print_section(KERN_ERR, "Object ", p, 774 min_t(unsigned int, s->object_size, PAGE_SIZE)); 775 if (s->flags & SLAB_RED_ZONE) 776 print_section(KERN_ERR, "Redzone ", p + s->object_size, 777 s->inuse - s->object_size); 778 779 off = get_info_end(s); 780 781 if (s->flags & SLAB_STORE_USER) 782 off += 2 * sizeof(struct track); 783 784 off += kasan_metadata_size(s); 785 786 if (off != size_from_object(s)) 787 /* Beginning of the filler is the free pointer */ 788 print_section(KERN_ERR, "Padding ", p + off, 789 size_from_object(s) - off); 790 791 dump_stack(); 792 } 793 794 void object_err(struct kmem_cache *s, struct page *page, 795 u8 *object, char *reason) 796 { 797 if (slab_add_kunit_errors()) 798 return; 799 800 slab_bug(s, "%s", reason); 801 print_trailer(s, page, object); 802 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 803 } 804 805 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, 806 const char *fmt, ...) 807 { 808 va_list args; 809 char buf[100]; 810 811 if (slab_add_kunit_errors()) 812 return; 813 814 va_start(args, fmt); 815 vsnprintf(buf, sizeof(buf), fmt, args); 816 va_end(args); 817 slab_bug(s, "%s", buf); 818 print_page_info(page); 819 dump_stack(); 820 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 821 } 822 823 static void init_object(struct kmem_cache *s, void *object, u8 val) 824 { 825 u8 *p = kasan_reset_tag(object); 826 827 if (s->flags & SLAB_RED_ZONE) 828 memset(p - s->red_left_pad, val, s->red_left_pad); 829 830 if (s->flags & __OBJECT_POISON) { 831 memset(p, POISON_FREE, s->object_size - 1); 832 p[s->object_size - 1] = POISON_END; 833 } 834 835 if (s->flags & SLAB_RED_ZONE) 836 memset(p + s->object_size, val, s->inuse - s->object_size); 837 } 838 839 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 840 void *from, void *to) 841 { 842 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 843 memset(from, data, to - from); 844 } 845 846 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 847 u8 *object, char *what, 848 u8 *start, unsigned int value, unsigned int bytes) 849 { 850 u8 *fault; 851 u8 *end; 852 u8 *addr = page_address(page); 853 854 metadata_access_enable(); 855 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 856 metadata_access_disable(); 857 if (!fault) 858 return 1; 859 860 end = start + bytes; 861 while (end > fault && end[-1] == value) 862 end--; 863 864 if (slab_add_kunit_errors()) 865 goto skip_bug_print; 866 867 slab_bug(s, "%s overwritten", what); 868 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 869 fault, end - 1, fault - addr, 870 fault[0], value); 871 print_trailer(s, page, object); 872 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 873 874 skip_bug_print: 875 restore_bytes(s, what, value, fault, end); 876 return 0; 877 } 878 879 /* 880 * Object layout: 881 * 882 * object address 883 * Bytes of the object to be managed. 884 * If the freepointer may overlay the object then the free 885 * pointer is at the middle of the object. 886 * 887 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 888 * 0xa5 (POISON_END) 889 * 890 * object + s->object_size 891 * Padding to reach word boundary. This is also used for Redzoning. 892 * Padding is extended by another word if Redzoning is enabled and 893 * object_size == inuse. 894 * 895 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 896 * 0xcc (RED_ACTIVE) for objects in use. 897 * 898 * object + s->inuse 899 * Meta data starts here. 900 * 901 * A. Free pointer (if we cannot overwrite object on free) 902 * B. Tracking data for SLAB_STORE_USER 903 * C. Padding to reach required alignment boundary or at minimum 904 * one word if debugging is on to be able to detect writes 905 * before the word boundary. 906 * 907 * Padding is done using 0x5a (POISON_INUSE) 908 * 909 * object + s->size 910 * Nothing is used beyond s->size. 911 * 912 * If slabcaches are merged then the object_size and inuse boundaries are mostly 913 * ignored. And therefore no slab options that rely on these boundaries 914 * may be used with merged slabcaches. 915 */ 916 917 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 918 { 919 unsigned long off = get_info_end(s); /* The end of info */ 920 921 if (s->flags & SLAB_STORE_USER) 922 /* We also have user information there */ 923 off += 2 * sizeof(struct track); 924 925 off += kasan_metadata_size(s); 926 927 if (size_from_object(s) == off) 928 return 1; 929 930 return check_bytes_and_report(s, page, p, "Object padding", 931 p + off, POISON_INUSE, size_from_object(s) - off); 932 } 933 934 /* Check the pad bytes at the end of a slab page */ 935 static int slab_pad_check(struct kmem_cache *s, struct page *page) 936 { 937 u8 *start; 938 u8 *fault; 939 u8 *end; 940 u8 *pad; 941 int length; 942 int remainder; 943 944 if (!(s->flags & SLAB_POISON)) 945 return 1; 946 947 start = page_address(page); 948 length = page_size(page); 949 end = start + length; 950 remainder = length % s->size; 951 if (!remainder) 952 return 1; 953 954 pad = end - remainder; 955 metadata_access_enable(); 956 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 957 metadata_access_disable(); 958 if (!fault) 959 return 1; 960 while (end > fault && end[-1] == POISON_INUSE) 961 end--; 962 963 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", 964 fault, end - 1, fault - start); 965 print_section(KERN_ERR, "Padding ", pad, remainder); 966 967 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 968 return 0; 969 } 970 971 static int check_object(struct kmem_cache *s, struct page *page, 972 void *object, u8 val) 973 { 974 u8 *p = object; 975 u8 *endobject = object + s->object_size; 976 977 if (s->flags & SLAB_RED_ZONE) { 978 if (!check_bytes_and_report(s, page, object, "Left Redzone", 979 object - s->red_left_pad, val, s->red_left_pad)) 980 return 0; 981 982 if (!check_bytes_and_report(s, page, object, "Right Redzone", 983 endobject, val, s->inuse - s->object_size)) 984 return 0; 985 } else { 986 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 987 check_bytes_and_report(s, page, p, "Alignment padding", 988 endobject, POISON_INUSE, 989 s->inuse - s->object_size); 990 } 991 } 992 993 if (s->flags & SLAB_POISON) { 994 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 995 (!check_bytes_and_report(s, page, p, "Poison", p, 996 POISON_FREE, s->object_size - 1) || 997 !check_bytes_and_report(s, page, p, "End Poison", 998 p + s->object_size - 1, POISON_END, 1))) 999 return 0; 1000 /* 1001 * check_pad_bytes cleans up on its own. 1002 */ 1003 check_pad_bytes(s, page, p); 1004 } 1005 1006 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1007 /* 1008 * Object and freepointer overlap. Cannot check 1009 * freepointer while object is allocated. 1010 */ 1011 return 1; 1012 1013 /* Check free pointer validity */ 1014 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 1015 object_err(s, page, p, "Freepointer corrupt"); 1016 /* 1017 * No choice but to zap it and thus lose the remainder 1018 * of the free objects in this slab. May cause 1019 * another error because the object count is now wrong. 1020 */ 1021 set_freepointer(s, p, NULL); 1022 return 0; 1023 } 1024 return 1; 1025 } 1026 1027 static int check_slab(struct kmem_cache *s, struct page *page) 1028 { 1029 int maxobj; 1030 1031 VM_BUG_ON(!irqs_disabled()); 1032 1033 if (!PageSlab(page)) { 1034 slab_err(s, page, "Not a valid slab page"); 1035 return 0; 1036 } 1037 1038 maxobj = order_objects(compound_order(page), s->size); 1039 if (page->objects > maxobj) { 1040 slab_err(s, page, "objects %u > max %u", 1041 page->objects, maxobj); 1042 return 0; 1043 } 1044 if (page->inuse > page->objects) { 1045 slab_err(s, page, "inuse %u > max %u", 1046 page->inuse, page->objects); 1047 return 0; 1048 } 1049 /* Slab_pad_check fixes things up after itself */ 1050 slab_pad_check(s, page); 1051 return 1; 1052 } 1053 1054 /* 1055 * Determine if a certain object on a page is on the freelist. Must hold the 1056 * slab lock to guarantee that the chains are in a consistent state. 1057 */ 1058 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 1059 { 1060 int nr = 0; 1061 void *fp; 1062 void *object = NULL; 1063 int max_objects; 1064 1065 fp = page->freelist; 1066 while (fp && nr <= page->objects) { 1067 if (fp == search) 1068 return 1; 1069 if (!check_valid_pointer(s, page, fp)) { 1070 if (object) { 1071 object_err(s, page, object, 1072 "Freechain corrupt"); 1073 set_freepointer(s, object, NULL); 1074 } else { 1075 slab_err(s, page, "Freepointer corrupt"); 1076 page->freelist = NULL; 1077 page->inuse = page->objects; 1078 slab_fix(s, "Freelist cleared"); 1079 return 0; 1080 } 1081 break; 1082 } 1083 object = fp; 1084 fp = get_freepointer(s, object); 1085 nr++; 1086 } 1087 1088 max_objects = order_objects(compound_order(page), s->size); 1089 if (max_objects > MAX_OBJS_PER_PAGE) 1090 max_objects = MAX_OBJS_PER_PAGE; 1091 1092 if (page->objects != max_objects) { 1093 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 1094 page->objects, max_objects); 1095 page->objects = max_objects; 1096 slab_fix(s, "Number of objects adjusted"); 1097 } 1098 if (page->inuse != page->objects - nr) { 1099 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1100 page->inuse, page->objects - nr); 1101 page->inuse = page->objects - nr; 1102 slab_fix(s, "Object count adjusted"); 1103 } 1104 return search == NULL; 1105 } 1106 1107 static void trace(struct kmem_cache *s, struct page *page, void *object, 1108 int alloc) 1109 { 1110 if (s->flags & SLAB_TRACE) { 1111 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1112 s->name, 1113 alloc ? "alloc" : "free", 1114 object, page->inuse, 1115 page->freelist); 1116 1117 if (!alloc) 1118 print_section(KERN_INFO, "Object ", (void *)object, 1119 s->object_size); 1120 1121 dump_stack(); 1122 } 1123 } 1124 1125 /* 1126 * Tracking of fully allocated slabs for debugging purposes. 1127 */ 1128 static void add_full(struct kmem_cache *s, 1129 struct kmem_cache_node *n, struct page *page) 1130 { 1131 if (!(s->flags & SLAB_STORE_USER)) 1132 return; 1133 1134 lockdep_assert_held(&n->list_lock); 1135 list_add(&page->slab_list, &n->full); 1136 } 1137 1138 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1139 { 1140 if (!(s->flags & SLAB_STORE_USER)) 1141 return; 1142 1143 lockdep_assert_held(&n->list_lock); 1144 list_del(&page->slab_list); 1145 } 1146 1147 /* Tracking of the number of slabs for debugging purposes */ 1148 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1149 { 1150 struct kmem_cache_node *n = get_node(s, node); 1151 1152 return atomic_long_read(&n->nr_slabs); 1153 } 1154 1155 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1156 { 1157 return atomic_long_read(&n->nr_slabs); 1158 } 1159 1160 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1161 { 1162 struct kmem_cache_node *n = get_node(s, node); 1163 1164 /* 1165 * May be called early in order to allocate a slab for the 1166 * kmem_cache_node structure. Solve the chicken-egg 1167 * dilemma by deferring the increment of the count during 1168 * bootstrap (see early_kmem_cache_node_alloc). 1169 */ 1170 if (likely(n)) { 1171 atomic_long_inc(&n->nr_slabs); 1172 atomic_long_add(objects, &n->total_objects); 1173 } 1174 } 1175 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1176 { 1177 struct kmem_cache_node *n = get_node(s, node); 1178 1179 atomic_long_dec(&n->nr_slabs); 1180 atomic_long_sub(objects, &n->total_objects); 1181 } 1182 1183 /* Object debug checks for alloc/free paths */ 1184 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1185 void *object) 1186 { 1187 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1188 return; 1189 1190 init_object(s, object, SLUB_RED_INACTIVE); 1191 init_tracking(s, object); 1192 } 1193 1194 static 1195 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1196 { 1197 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1198 return; 1199 1200 metadata_access_enable(); 1201 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page)); 1202 metadata_access_disable(); 1203 } 1204 1205 static inline int alloc_consistency_checks(struct kmem_cache *s, 1206 struct page *page, void *object) 1207 { 1208 if (!check_slab(s, page)) 1209 return 0; 1210 1211 if (!check_valid_pointer(s, page, object)) { 1212 object_err(s, page, object, "Freelist Pointer check fails"); 1213 return 0; 1214 } 1215 1216 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1217 return 0; 1218 1219 return 1; 1220 } 1221 1222 static noinline int alloc_debug_processing(struct kmem_cache *s, 1223 struct page *page, 1224 void *object, unsigned long addr) 1225 { 1226 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1227 if (!alloc_consistency_checks(s, page, object)) 1228 goto bad; 1229 } 1230 1231 /* Success perform special debug activities for allocs */ 1232 if (s->flags & SLAB_STORE_USER) 1233 set_track(s, object, TRACK_ALLOC, addr); 1234 trace(s, page, object, 1); 1235 init_object(s, object, SLUB_RED_ACTIVE); 1236 return 1; 1237 1238 bad: 1239 if (PageSlab(page)) { 1240 /* 1241 * If this is a slab page then lets do the best we can 1242 * to avoid issues in the future. Marking all objects 1243 * as used avoids touching the remaining objects. 1244 */ 1245 slab_fix(s, "Marking all objects used"); 1246 page->inuse = page->objects; 1247 page->freelist = NULL; 1248 } 1249 return 0; 1250 } 1251 1252 static inline int free_consistency_checks(struct kmem_cache *s, 1253 struct page *page, void *object, unsigned long addr) 1254 { 1255 if (!check_valid_pointer(s, page, object)) { 1256 slab_err(s, page, "Invalid object pointer 0x%p", object); 1257 return 0; 1258 } 1259 1260 if (on_freelist(s, page, object)) { 1261 object_err(s, page, object, "Object already free"); 1262 return 0; 1263 } 1264 1265 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1266 return 0; 1267 1268 if (unlikely(s != page->slab_cache)) { 1269 if (!PageSlab(page)) { 1270 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1271 object); 1272 } else if (!page->slab_cache) { 1273 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1274 object); 1275 dump_stack(); 1276 } else 1277 object_err(s, page, object, 1278 "page slab pointer corrupt."); 1279 return 0; 1280 } 1281 return 1; 1282 } 1283 1284 /* Supports checking bulk free of a constructed freelist */ 1285 static noinline int free_debug_processing( 1286 struct kmem_cache *s, struct page *page, 1287 void *head, void *tail, int bulk_cnt, 1288 unsigned long addr) 1289 { 1290 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1291 void *object = head; 1292 int cnt = 0; 1293 unsigned long flags; 1294 int ret = 0; 1295 1296 spin_lock_irqsave(&n->list_lock, flags); 1297 slab_lock(page); 1298 1299 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1300 if (!check_slab(s, page)) 1301 goto out; 1302 } 1303 1304 next_object: 1305 cnt++; 1306 1307 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1308 if (!free_consistency_checks(s, page, object, addr)) 1309 goto out; 1310 } 1311 1312 if (s->flags & SLAB_STORE_USER) 1313 set_track(s, object, TRACK_FREE, addr); 1314 trace(s, page, object, 0); 1315 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1316 init_object(s, object, SLUB_RED_INACTIVE); 1317 1318 /* Reached end of constructed freelist yet? */ 1319 if (object != tail) { 1320 object = get_freepointer(s, object); 1321 goto next_object; 1322 } 1323 ret = 1; 1324 1325 out: 1326 if (cnt != bulk_cnt) 1327 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1328 bulk_cnt, cnt); 1329 1330 slab_unlock(page); 1331 spin_unlock_irqrestore(&n->list_lock, flags); 1332 if (!ret) 1333 slab_fix(s, "Object at 0x%p not freed", object); 1334 return ret; 1335 } 1336 1337 /* 1338 * Parse a block of slub_debug options. Blocks are delimited by ';' 1339 * 1340 * @str: start of block 1341 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1342 * @slabs: return start of list of slabs, or NULL when there's no list 1343 * @init: assume this is initial parsing and not per-kmem-create parsing 1344 * 1345 * returns the start of next block if there's any, or NULL 1346 */ 1347 static char * 1348 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1349 { 1350 bool higher_order_disable = false; 1351 1352 /* Skip any completely empty blocks */ 1353 while (*str && *str == ';') 1354 str++; 1355 1356 if (*str == ',') { 1357 /* 1358 * No options but restriction on slabs. This means full 1359 * debugging for slabs matching a pattern. 1360 */ 1361 *flags = DEBUG_DEFAULT_FLAGS; 1362 goto check_slabs; 1363 } 1364 *flags = 0; 1365 1366 /* Determine which debug features should be switched on */ 1367 for (; *str && *str != ',' && *str != ';'; str++) { 1368 switch (tolower(*str)) { 1369 case '-': 1370 *flags = 0; 1371 break; 1372 case 'f': 1373 *flags |= SLAB_CONSISTENCY_CHECKS; 1374 break; 1375 case 'z': 1376 *flags |= SLAB_RED_ZONE; 1377 break; 1378 case 'p': 1379 *flags |= SLAB_POISON; 1380 break; 1381 case 'u': 1382 *flags |= SLAB_STORE_USER; 1383 break; 1384 case 't': 1385 *flags |= SLAB_TRACE; 1386 break; 1387 case 'a': 1388 *flags |= SLAB_FAILSLAB; 1389 break; 1390 case 'o': 1391 /* 1392 * Avoid enabling debugging on caches if its minimum 1393 * order would increase as a result. 1394 */ 1395 higher_order_disable = true; 1396 break; 1397 default: 1398 if (init) 1399 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1400 } 1401 } 1402 check_slabs: 1403 if (*str == ',') 1404 *slabs = ++str; 1405 else 1406 *slabs = NULL; 1407 1408 /* Skip over the slab list */ 1409 while (*str && *str != ';') 1410 str++; 1411 1412 /* Skip any completely empty blocks */ 1413 while (*str && *str == ';') 1414 str++; 1415 1416 if (init && higher_order_disable) 1417 disable_higher_order_debug = 1; 1418 1419 if (*str) 1420 return str; 1421 else 1422 return NULL; 1423 } 1424 1425 static int __init setup_slub_debug(char *str) 1426 { 1427 slab_flags_t flags; 1428 char *saved_str; 1429 char *slab_list; 1430 bool global_slub_debug_changed = false; 1431 bool slab_list_specified = false; 1432 1433 slub_debug = DEBUG_DEFAULT_FLAGS; 1434 if (*str++ != '=' || !*str) 1435 /* 1436 * No options specified. Switch on full debugging. 1437 */ 1438 goto out; 1439 1440 saved_str = str; 1441 while (str) { 1442 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1443 1444 if (!slab_list) { 1445 slub_debug = flags; 1446 global_slub_debug_changed = true; 1447 } else { 1448 slab_list_specified = true; 1449 } 1450 } 1451 1452 /* 1453 * For backwards compatibility, a single list of flags with list of 1454 * slabs means debugging is only enabled for those slabs, so the global 1455 * slub_debug should be 0. We can extended that to multiple lists as 1456 * long as there is no option specifying flags without a slab list. 1457 */ 1458 if (slab_list_specified) { 1459 if (!global_slub_debug_changed) 1460 slub_debug = 0; 1461 slub_debug_string = saved_str; 1462 } 1463 out: 1464 if (slub_debug != 0 || slub_debug_string) 1465 static_branch_enable(&slub_debug_enabled); 1466 else 1467 static_branch_disable(&slub_debug_enabled); 1468 if ((static_branch_unlikely(&init_on_alloc) || 1469 static_branch_unlikely(&init_on_free)) && 1470 (slub_debug & SLAB_POISON)) 1471 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1472 return 1; 1473 } 1474 1475 __setup("slub_debug", setup_slub_debug); 1476 1477 /* 1478 * kmem_cache_flags - apply debugging options to the cache 1479 * @object_size: the size of an object without meta data 1480 * @flags: flags to set 1481 * @name: name of the cache 1482 * 1483 * Debug option(s) are applied to @flags. In addition to the debug 1484 * option(s), if a slab name (or multiple) is specified i.e. 1485 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1486 * then only the select slabs will receive the debug option(s). 1487 */ 1488 slab_flags_t kmem_cache_flags(unsigned int object_size, 1489 slab_flags_t flags, const char *name) 1490 { 1491 char *iter; 1492 size_t len; 1493 char *next_block; 1494 slab_flags_t block_flags; 1495 slab_flags_t slub_debug_local = slub_debug; 1496 1497 /* 1498 * If the slab cache is for debugging (e.g. kmemleak) then 1499 * don't store user (stack trace) information by default, 1500 * but let the user enable it via the command line below. 1501 */ 1502 if (flags & SLAB_NOLEAKTRACE) 1503 slub_debug_local &= ~SLAB_STORE_USER; 1504 1505 len = strlen(name); 1506 next_block = slub_debug_string; 1507 /* Go through all blocks of debug options, see if any matches our slab's name */ 1508 while (next_block) { 1509 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1510 if (!iter) 1511 continue; 1512 /* Found a block that has a slab list, search it */ 1513 while (*iter) { 1514 char *end, *glob; 1515 size_t cmplen; 1516 1517 end = strchrnul(iter, ','); 1518 if (next_block && next_block < end) 1519 end = next_block - 1; 1520 1521 glob = strnchr(iter, end - iter, '*'); 1522 if (glob) 1523 cmplen = glob - iter; 1524 else 1525 cmplen = max_t(size_t, len, (end - iter)); 1526 1527 if (!strncmp(name, iter, cmplen)) { 1528 flags |= block_flags; 1529 return flags; 1530 } 1531 1532 if (!*end || *end == ';') 1533 break; 1534 iter = end + 1; 1535 } 1536 } 1537 1538 return flags | slub_debug_local; 1539 } 1540 #else /* !CONFIG_SLUB_DEBUG */ 1541 static inline void setup_object_debug(struct kmem_cache *s, 1542 struct page *page, void *object) {} 1543 static inline 1544 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1545 1546 static inline int alloc_debug_processing(struct kmem_cache *s, 1547 struct page *page, void *object, unsigned long addr) { return 0; } 1548 1549 static inline int free_debug_processing( 1550 struct kmem_cache *s, struct page *page, 1551 void *head, void *tail, int bulk_cnt, 1552 unsigned long addr) { return 0; } 1553 1554 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1555 { return 1; } 1556 static inline int check_object(struct kmem_cache *s, struct page *page, 1557 void *object, u8 val) { return 1; } 1558 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1559 struct page *page) {} 1560 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1561 struct page *page) {} 1562 slab_flags_t kmem_cache_flags(unsigned int object_size, 1563 slab_flags_t flags, const char *name) 1564 { 1565 return flags; 1566 } 1567 #define slub_debug 0 1568 1569 #define disable_higher_order_debug 0 1570 1571 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1572 { return 0; } 1573 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1574 { return 0; } 1575 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1576 int objects) {} 1577 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1578 int objects) {} 1579 1580 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 1581 void **freelist, void *nextfree) 1582 { 1583 return false; 1584 } 1585 #endif /* CONFIG_SLUB_DEBUG */ 1586 1587 /* 1588 * Hooks for other subsystems that check memory allocations. In a typical 1589 * production configuration these hooks all should produce no code at all. 1590 */ 1591 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1592 { 1593 ptr = kasan_kmalloc_large(ptr, size, flags); 1594 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1595 kmemleak_alloc(ptr, size, 1, flags); 1596 return ptr; 1597 } 1598 1599 static __always_inline void kfree_hook(void *x) 1600 { 1601 kmemleak_free(x); 1602 kasan_kfree_large(x); 1603 } 1604 1605 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1606 void *x, bool init) 1607 { 1608 kmemleak_free_recursive(x, s->flags); 1609 1610 /* 1611 * Trouble is that we may no longer disable interrupts in the fast path 1612 * So in order to make the debug calls that expect irqs to be 1613 * disabled we need to disable interrupts temporarily. 1614 */ 1615 #ifdef CONFIG_LOCKDEP 1616 { 1617 unsigned long flags; 1618 1619 local_irq_save(flags); 1620 debug_check_no_locks_freed(x, s->object_size); 1621 local_irq_restore(flags); 1622 } 1623 #endif 1624 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1625 debug_check_no_obj_freed(x, s->object_size); 1626 1627 /* Use KCSAN to help debug racy use-after-free. */ 1628 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1629 __kcsan_check_access(x, s->object_size, 1630 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1631 1632 /* 1633 * As memory initialization might be integrated into KASAN, 1634 * kasan_slab_free and initialization memset's must be 1635 * kept together to avoid discrepancies in behavior. 1636 * 1637 * The initialization memset's clear the object and the metadata, 1638 * but don't touch the SLAB redzone. 1639 */ 1640 if (init) { 1641 int rsize; 1642 1643 if (!kasan_has_integrated_init()) 1644 memset(kasan_reset_tag(x), 0, s->object_size); 1645 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1646 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1647 s->size - s->inuse - rsize); 1648 } 1649 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1650 return kasan_slab_free(s, x, init); 1651 } 1652 1653 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1654 void **head, void **tail) 1655 { 1656 1657 void *object; 1658 void *next = *head; 1659 void *old_tail = *tail ? *tail : *head; 1660 1661 if (is_kfence_address(next)) { 1662 slab_free_hook(s, next, false); 1663 return true; 1664 } 1665 1666 /* Head and tail of the reconstructed freelist */ 1667 *head = NULL; 1668 *tail = NULL; 1669 1670 do { 1671 object = next; 1672 next = get_freepointer(s, object); 1673 1674 /* If object's reuse doesn't have to be delayed */ 1675 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1676 /* Move object to the new freelist */ 1677 set_freepointer(s, object, *head); 1678 *head = object; 1679 if (!*tail) 1680 *tail = object; 1681 } 1682 } while (object != old_tail); 1683 1684 if (*head == *tail) 1685 *tail = NULL; 1686 1687 return *head != NULL; 1688 } 1689 1690 static void *setup_object(struct kmem_cache *s, struct page *page, 1691 void *object) 1692 { 1693 setup_object_debug(s, page, object); 1694 object = kasan_init_slab_obj(s, object); 1695 if (unlikely(s->ctor)) { 1696 kasan_unpoison_object_data(s, object); 1697 s->ctor(object); 1698 kasan_poison_object_data(s, object); 1699 } 1700 return object; 1701 } 1702 1703 /* 1704 * Slab allocation and freeing 1705 */ 1706 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1707 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1708 { 1709 struct page *page; 1710 unsigned int order = oo_order(oo); 1711 1712 if (node == NUMA_NO_NODE) 1713 page = alloc_pages(flags, order); 1714 else 1715 page = __alloc_pages_node(node, flags, order); 1716 1717 return page; 1718 } 1719 1720 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1721 /* Pre-initialize the random sequence cache */ 1722 static int init_cache_random_seq(struct kmem_cache *s) 1723 { 1724 unsigned int count = oo_objects(s->oo); 1725 int err; 1726 1727 /* Bailout if already initialised */ 1728 if (s->random_seq) 1729 return 0; 1730 1731 err = cache_random_seq_create(s, count, GFP_KERNEL); 1732 if (err) { 1733 pr_err("SLUB: Unable to initialize free list for %s\n", 1734 s->name); 1735 return err; 1736 } 1737 1738 /* Transform to an offset on the set of pages */ 1739 if (s->random_seq) { 1740 unsigned int i; 1741 1742 for (i = 0; i < count; i++) 1743 s->random_seq[i] *= s->size; 1744 } 1745 return 0; 1746 } 1747 1748 /* Initialize each random sequence freelist per cache */ 1749 static void __init init_freelist_randomization(void) 1750 { 1751 struct kmem_cache *s; 1752 1753 mutex_lock(&slab_mutex); 1754 1755 list_for_each_entry(s, &slab_caches, list) 1756 init_cache_random_seq(s); 1757 1758 mutex_unlock(&slab_mutex); 1759 } 1760 1761 /* Get the next entry on the pre-computed freelist randomized */ 1762 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1763 unsigned long *pos, void *start, 1764 unsigned long page_limit, 1765 unsigned long freelist_count) 1766 { 1767 unsigned int idx; 1768 1769 /* 1770 * If the target page allocation failed, the number of objects on the 1771 * page might be smaller than the usual size defined by the cache. 1772 */ 1773 do { 1774 idx = s->random_seq[*pos]; 1775 *pos += 1; 1776 if (*pos >= freelist_count) 1777 *pos = 0; 1778 } while (unlikely(idx >= page_limit)); 1779 1780 return (char *)start + idx; 1781 } 1782 1783 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1784 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1785 { 1786 void *start; 1787 void *cur; 1788 void *next; 1789 unsigned long idx, pos, page_limit, freelist_count; 1790 1791 if (page->objects < 2 || !s->random_seq) 1792 return false; 1793 1794 freelist_count = oo_objects(s->oo); 1795 pos = get_random_int() % freelist_count; 1796 1797 page_limit = page->objects * s->size; 1798 start = fixup_red_left(s, page_address(page)); 1799 1800 /* First entry is used as the base of the freelist */ 1801 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1802 freelist_count); 1803 cur = setup_object(s, page, cur); 1804 page->freelist = cur; 1805 1806 for (idx = 1; idx < page->objects; idx++) { 1807 next = next_freelist_entry(s, page, &pos, start, page_limit, 1808 freelist_count); 1809 next = setup_object(s, page, next); 1810 set_freepointer(s, cur, next); 1811 cur = next; 1812 } 1813 set_freepointer(s, cur, NULL); 1814 1815 return true; 1816 } 1817 #else 1818 static inline int init_cache_random_seq(struct kmem_cache *s) 1819 { 1820 return 0; 1821 } 1822 static inline void init_freelist_randomization(void) { } 1823 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1824 { 1825 return false; 1826 } 1827 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1828 1829 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1830 { 1831 struct page *page; 1832 struct kmem_cache_order_objects oo = s->oo; 1833 gfp_t alloc_gfp; 1834 void *start, *p, *next; 1835 int idx; 1836 bool shuffle; 1837 1838 flags &= gfp_allowed_mask; 1839 1840 if (gfpflags_allow_blocking(flags)) 1841 local_irq_enable(); 1842 1843 flags |= s->allocflags; 1844 1845 /* 1846 * Let the initial higher-order allocation fail under memory pressure 1847 * so we fall-back to the minimum order allocation. 1848 */ 1849 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1850 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1851 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1852 1853 page = alloc_slab_page(s, alloc_gfp, node, oo); 1854 if (unlikely(!page)) { 1855 oo = s->min; 1856 alloc_gfp = flags; 1857 /* 1858 * Allocation may have failed due to fragmentation. 1859 * Try a lower order alloc if possible 1860 */ 1861 page = alloc_slab_page(s, alloc_gfp, node, oo); 1862 if (unlikely(!page)) 1863 goto out; 1864 stat(s, ORDER_FALLBACK); 1865 } 1866 1867 page->objects = oo_objects(oo); 1868 1869 account_slab_page(page, oo_order(oo), s, flags); 1870 1871 page->slab_cache = s; 1872 __SetPageSlab(page); 1873 if (page_is_pfmemalloc(page)) 1874 SetPageSlabPfmemalloc(page); 1875 1876 kasan_poison_slab(page); 1877 1878 start = page_address(page); 1879 1880 setup_page_debug(s, page, start); 1881 1882 shuffle = shuffle_freelist(s, page); 1883 1884 if (!shuffle) { 1885 start = fixup_red_left(s, start); 1886 start = setup_object(s, page, start); 1887 page->freelist = start; 1888 for (idx = 0, p = start; idx < page->objects - 1; idx++) { 1889 next = p + s->size; 1890 next = setup_object(s, page, next); 1891 set_freepointer(s, p, next); 1892 p = next; 1893 } 1894 set_freepointer(s, p, NULL); 1895 } 1896 1897 page->inuse = page->objects; 1898 page->frozen = 1; 1899 1900 out: 1901 if (gfpflags_allow_blocking(flags)) 1902 local_irq_disable(); 1903 if (!page) 1904 return NULL; 1905 1906 inc_slabs_node(s, page_to_nid(page), page->objects); 1907 1908 return page; 1909 } 1910 1911 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1912 { 1913 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1914 flags = kmalloc_fix_flags(flags); 1915 1916 return allocate_slab(s, 1917 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1918 } 1919 1920 static void __free_slab(struct kmem_cache *s, struct page *page) 1921 { 1922 int order = compound_order(page); 1923 int pages = 1 << order; 1924 1925 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 1926 void *p; 1927 1928 slab_pad_check(s, page); 1929 for_each_object(p, s, page_address(page), 1930 page->objects) 1931 check_object(s, page, p, SLUB_RED_INACTIVE); 1932 } 1933 1934 __ClearPageSlabPfmemalloc(page); 1935 __ClearPageSlab(page); 1936 /* In union with page->mapping where page allocator expects NULL */ 1937 page->slab_cache = NULL; 1938 if (current->reclaim_state) 1939 current->reclaim_state->reclaimed_slab += pages; 1940 unaccount_slab_page(page, order, s); 1941 __free_pages(page, order); 1942 } 1943 1944 static void rcu_free_slab(struct rcu_head *h) 1945 { 1946 struct page *page = container_of(h, struct page, rcu_head); 1947 1948 __free_slab(page->slab_cache, page); 1949 } 1950 1951 static void free_slab(struct kmem_cache *s, struct page *page) 1952 { 1953 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 1954 call_rcu(&page->rcu_head, rcu_free_slab); 1955 } else 1956 __free_slab(s, page); 1957 } 1958 1959 static void discard_slab(struct kmem_cache *s, struct page *page) 1960 { 1961 dec_slabs_node(s, page_to_nid(page), page->objects); 1962 free_slab(s, page); 1963 } 1964 1965 /* 1966 * Management of partially allocated slabs. 1967 */ 1968 static inline void 1969 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 1970 { 1971 n->nr_partial++; 1972 if (tail == DEACTIVATE_TO_TAIL) 1973 list_add_tail(&page->slab_list, &n->partial); 1974 else 1975 list_add(&page->slab_list, &n->partial); 1976 } 1977 1978 static inline void add_partial(struct kmem_cache_node *n, 1979 struct page *page, int tail) 1980 { 1981 lockdep_assert_held(&n->list_lock); 1982 __add_partial(n, page, tail); 1983 } 1984 1985 static inline void remove_partial(struct kmem_cache_node *n, 1986 struct page *page) 1987 { 1988 lockdep_assert_held(&n->list_lock); 1989 list_del(&page->slab_list); 1990 n->nr_partial--; 1991 } 1992 1993 /* 1994 * Remove slab from the partial list, freeze it and 1995 * return the pointer to the freelist. 1996 * 1997 * Returns a list of objects or NULL if it fails. 1998 */ 1999 static inline void *acquire_slab(struct kmem_cache *s, 2000 struct kmem_cache_node *n, struct page *page, 2001 int mode, int *objects) 2002 { 2003 void *freelist; 2004 unsigned long counters; 2005 struct page new; 2006 2007 lockdep_assert_held(&n->list_lock); 2008 2009 /* 2010 * Zap the freelist and set the frozen bit. 2011 * The old freelist is the list of objects for the 2012 * per cpu allocation list. 2013 */ 2014 freelist = page->freelist; 2015 counters = page->counters; 2016 new.counters = counters; 2017 *objects = new.objects - new.inuse; 2018 if (mode) { 2019 new.inuse = page->objects; 2020 new.freelist = NULL; 2021 } else { 2022 new.freelist = freelist; 2023 } 2024 2025 VM_BUG_ON(new.frozen); 2026 new.frozen = 1; 2027 2028 if (!__cmpxchg_double_slab(s, page, 2029 freelist, counters, 2030 new.freelist, new.counters, 2031 "acquire_slab")) 2032 return NULL; 2033 2034 remove_partial(n, page); 2035 WARN_ON(!freelist); 2036 return freelist; 2037 } 2038 2039 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 2040 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 2041 2042 /* 2043 * Try to allocate a partial slab from a specific node. 2044 */ 2045 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 2046 struct kmem_cache_cpu *c, gfp_t flags) 2047 { 2048 struct page *page, *page2; 2049 void *object = NULL; 2050 unsigned int available = 0; 2051 int objects; 2052 2053 /* 2054 * Racy check. If we mistakenly see no partial slabs then we 2055 * just allocate an empty slab. If we mistakenly try to get a 2056 * partial slab and there is none available then get_partial() 2057 * will return NULL. 2058 */ 2059 if (!n || !n->nr_partial) 2060 return NULL; 2061 2062 spin_lock(&n->list_lock); 2063 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { 2064 void *t; 2065 2066 if (!pfmemalloc_match(page, flags)) 2067 continue; 2068 2069 t = acquire_slab(s, n, page, object == NULL, &objects); 2070 if (!t) 2071 break; 2072 2073 available += objects; 2074 if (!object) { 2075 c->page = page; 2076 stat(s, ALLOC_FROM_PARTIAL); 2077 object = t; 2078 } else { 2079 put_cpu_partial(s, page, 0); 2080 stat(s, CPU_PARTIAL_NODE); 2081 } 2082 if (!kmem_cache_has_cpu_partial(s) 2083 || available > slub_cpu_partial(s) / 2) 2084 break; 2085 2086 } 2087 spin_unlock(&n->list_lock); 2088 return object; 2089 } 2090 2091 /* 2092 * Get a page from somewhere. Search in increasing NUMA distances. 2093 */ 2094 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2095 struct kmem_cache_cpu *c) 2096 { 2097 #ifdef CONFIG_NUMA 2098 struct zonelist *zonelist; 2099 struct zoneref *z; 2100 struct zone *zone; 2101 enum zone_type highest_zoneidx = gfp_zone(flags); 2102 void *object; 2103 unsigned int cpuset_mems_cookie; 2104 2105 /* 2106 * The defrag ratio allows a configuration of the tradeoffs between 2107 * inter node defragmentation and node local allocations. A lower 2108 * defrag_ratio increases the tendency to do local allocations 2109 * instead of attempting to obtain partial slabs from other nodes. 2110 * 2111 * If the defrag_ratio is set to 0 then kmalloc() always 2112 * returns node local objects. If the ratio is higher then kmalloc() 2113 * may return off node objects because partial slabs are obtained 2114 * from other nodes and filled up. 2115 * 2116 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2117 * (which makes defrag_ratio = 1000) then every (well almost) 2118 * allocation will first attempt to defrag slab caches on other nodes. 2119 * This means scanning over all nodes to look for partial slabs which 2120 * may be expensive if we do it every time we are trying to find a slab 2121 * with available objects. 2122 */ 2123 if (!s->remote_node_defrag_ratio || 2124 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2125 return NULL; 2126 2127 do { 2128 cpuset_mems_cookie = read_mems_allowed_begin(); 2129 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2130 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2131 struct kmem_cache_node *n; 2132 2133 n = get_node(s, zone_to_nid(zone)); 2134 2135 if (n && cpuset_zone_allowed(zone, flags) && 2136 n->nr_partial > s->min_partial) { 2137 object = get_partial_node(s, n, c, flags); 2138 if (object) { 2139 /* 2140 * Don't check read_mems_allowed_retry() 2141 * here - if mems_allowed was updated in 2142 * parallel, that was a harmless race 2143 * between allocation and the cpuset 2144 * update 2145 */ 2146 return object; 2147 } 2148 } 2149 } 2150 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2151 #endif /* CONFIG_NUMA */ 2152 return NULL; 2153 } 2154 2155 /* 2156 * Get a partial page, lock it and return it. 2157 */ 2158 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2159 struct kmem_cache_cpu *c) 2160 { 2161 void *object; 2162 int searchnode = node; 2163 2164 if (node == NUMA_NO_NODE) 2165 searchnode = numa_mem_id(); 2166 2167 object = get_partial_node(s, get_node(s, searchnode), c, flags); 2168 if (object || node != NUMA_NO_NODE) 2169 return object; 2170 2171 return get_any_partial(s, flags, c); 2172 } 2173 2174 #ifdef CONFIG_PREEMPTION 2175 /* 2176 * Calculate the next globally unique transaction for disambiguation 2177 * during cmpxchg. The transactions start with the cpu number and are then 2178 * incremented by CONFIG_NR_CPUS. 2179 */ 2180 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2181 #else 2182 /* 2183 * No preemption supported therefore also no need to check for 2184 * different cpus. 2185 */ 2186 #define TID_STEP 1 2187 #endif 2188 2189 static inline unsigned long next_tid(unsigned long tid) 2190 { 2191 return tid + TID_STEP; 2192 } 2193 2194 #ifdef SLUB_DEBUG_CMPXCHG 2195 static inline unsigned int tid_to_cpu(unsigned long tid) 2196 { 2197 return tid % TID_STEP; 2198 } 2199 2200 static inline unsigned long tid_to_event(unsigned long tid) 2201 { 2202 return tid / TID_STEP; 2203 } 2204 #endif 2205 2206 static inline unsigned int init_tid(int cpu) 2207 { 2208 return cpu; 2209 } 2210 2211 static inline void note_cmpxchg_failure(const char *n, 2212 const struct kmem_cache *s, unsigned long tid) 2213 { 2214 #ifdef SLUB_DEBUG_CMPXCHG 2215 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2216 2217 pr_info("%s %s: cmpxchg redo ", n, s->name); 2218 2219 #ifdef CONFIG_PREEMPTION 2220 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2221 pr_warn("due to cpu change %d -> %d\n", 2222 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2223 else 2224 #endif 2225 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2226 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2227 tid_to_event(tid), tid_to_event(actual_tid)); 2228 else 2229 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2230 actual_tid, tid, next_tid(tid)); 2231 #endif 2232 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2233 } 2234 2235 static void init_kmem_cache_cpus(struct kmem_cache *s) 2236 { 2237 int cpu; 2238 2239 for_each_possible_cpu(cpu) 2240 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 2241 } 2242 2243 /* 2244 * Remove the cpu slab 2245 */ 2246 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2247 void *freelist, struct kmem_cache_cpu *c) 2248 { 2249 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2250 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2251 int lock = 0, free_delta = 0; 2252 enum slab_modes l = M_NONE, m = M_NONE; 2253 void *nextfree, *freelist_iter, *freelist_tail; 2254 int tail = DEACTIVATE_TO_HEAD; 2255 struct page new; 2256 struct page old; 2257 2258 if (page->freelist) { 2259 stat(s, DEACTIVATE_REMOTE_FREES); 2260 tail = DEACTIVATE_TO_TAIL; 2261 } 2262 2263 /* 2264 * Stage one: Count the objects on cpu's freelist as free_delta and 2265 * remember the last object in freelist_tail for later splicing. 2266 */ 2267 freelist_tail = NULL; 2268 freelist_iter = freelist; 2269 while (freelist_iter) { 2270 nextfree = get_freepointer(s, freelist_iter); 2271 2272 /* 2273 * If 'nextfree' is invalid, it is possible that the object at 2274 * 'freelist_iter' is already corrupted. So isolate all objects 2275 * starting at 'freelist_iter' by skipping them. 2276 */ 2277 if (freelist_corrupted(s, page, &freelist_iter, nextfree)) 2278 break; 2279 2280 freelist_tail = freelist_iter; 2281 free_delta++; 2282 2283 freelist_iter = nextfree; 2284 } 2285 2286 /* 2287 * Stage two: Unfreeze the page while splicing the per-cpu 2288 * freelist to the head of page's freelist. 2289 * 2290 * Ensure that the page is unfrozen while the list presence 2291 * reflects the actual number of objects during unfreeze. 2292 * 2293 * We setup the list membership and then perform a cmpxchg 2294 * with the count. If there is a mismatch then the page 2295 * is not unfrozen but the page is on the wrong list. 2296 * 2297 * Then we restart the process which may have to remove 2298 * the page from the list that we just put it on again 2299 * because the number of objects in the slab may have 2300 * changed. 2301 */ 2302 redo: 2303 2304 old.freelist = READ_ONCE(page->freelist); 2305 old.counters = READ_ONCE(page->counters); 2306 VM_BUG_ON(!old.frozen); 2307 2308 /* Determine target state of the slab */ 2309 new.counters = old.counters; 2310 if (freelist_tail) { 2311 new.inuse -= free_delta; 2312 set_freepointer(s, freelist_tail, old.freelist); 2313 new.freelist = freelist; 2314 } else 2315 new.freelist = old.freelist; 2316 2317 new.frozen = 0; 2318 2319 if (!new.inuse && n->nr_partial >= s->min_partial) 2320 m = M_FREE; 2321 else if (new.freelist) { 2322 m = M_PARTIAL; 2323 if (!lock) { 2324 lock = 1; 2325 /* 2326 * Taking the spinlock removes the possibility 2327 * that acquire_slab() will see a slab page that 2328 * is frozen 2329 */ 2330 spin_lock(&n->list_lock); 2331 } 2332 } else { 2333 m = M_FULL; 2334 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) { 2335 lock = 1; 2336 /* 2337 * This also ensures that the scanning of full 2338 * slabs from diagnostic functions will not see 2339 * any frozen slabs. 2340 */ 2341 spin_lock(&n->list_lock); 2342 } 2343 } 2344 2345 if (l != m) { 2346 if (l == M_PARTIAL) 2347 remove_partial(n, page); 2348 else if (l == M_FULL) 2349 remove_full(s, n, page); 2350 2351 if (m == M_PARTIAL) 2352 add_partial(n, page, tail); 2353 else if (m == M_FULL) 2354 add_full(s, n, page); 2355 } 2356 2357 l = m; 2358 if (!__cmpxchg_double_slab(s, page, 2359 old.freelist, old.counters, 2360 new.freelist, new.counters, 2361 "unfreezing slab")) 2362 goto redo; 2363 2364 if (lock) 2365 spin_unlock(&n->list_lock); 2366 2367 if (m == M_PARTIAL) 2368 stat(s, tail); 2369 else if (m == M_FULL) 2370 stat(s, DEACTIVATE_FULL); 2371 else if (m == M_FREE) { 2372 stat(s, DEACTIVATE_EMPTY); 2373 discard_slab(s, page); 2374 stat(s, FREE_SLAB); 2375 } 2376 2377 c->page = NULL; 2378 c->freelist = NULL; 2379 } 2380 2381 /* 2382 * Unfreeze all the cpu partial slabs. 2383 * 2384 * This function must be called with interrupts disabled 2385 * for the cpu using c (or some other guarantee must be there 2386 * to guarantee no concurrent accesses). 2387 */ 2388 static void unfreeze_partials(struct kmem_cache *s, 2389 struct kmem_cache_cpu *c) 2390 { 2391 #ifdef CONFIG_SLUB_CPU_PARTIAL 2392 struct kmem_cache_node *n = NULL, *n2 = NULL; 2393 struct page *page, *discard_page = NULL; 2394 2395 while ((page = slub_percpu_partial(c))) { 2396 struct page new; 2397 struct page old; 2398 2399 slub_set_percpu_partial(c, page); 2400 2401 n2 = get_node(s, page_to_nid(page)); 2402 if (n != n2) { 2403 if (n) 2404 spin_unlock(&n->list_lock); 2405 2406 n = n2; 2407 spin_lock(&n->list_lock); 2408 } 2409 2410 do { 2411 2412 old.freelist = page->freelist; 2413 old.counters = page->counters; 2414 VM_BUG_ON(!old.frozen); 2415 2416 new.counters = old.counters; 2417 new.freelist = old.freelist; 2418 2419 new.frozen = 0; 2420 2421 } while (!__cmpxchg_double_slab(s, page, 2422 old.freelist, old.counters, 2423 new.freelist, new.counters, 2424 "unfreezing slab")); 2425 2426 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2427 page->next = discard_page; 2428 discard_page = page; 2429 } else { 2430 add_partial(n, page, DEACTIVATE_TO_TAIL); 2431 stat(s, FREE_ADD_PARTIAL); 2432 } 2433 } 2434 2435 if (n) 2436 spin_unlock(&n->list_lock); 2437 2438 while (discard_page) { 2439 page = discard_page; 2440 discard_page = discard_page->next; 2441 2442 stat(s, DEACTIVATE_EMPTY); 2443 discard_slab(s, page); 2444 stat(s, FREE_SLAB); 2445 } 2446 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2447 } 2448 2449 /* 2450 * Put a page that was just frozen (in __slab_free|get_partial_node) into a 2451 * partial page slot if available. 2452 * 2453 * If we did not find a slot then simply move all the partials to the 2454 * per node partial list. 2455 */ 2456 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2457 { 2458 #ifdef CONFIG_SLUB_CPU_PARTIAL 2459 struct page *oldpage; 2460 int pages; 2461 int pobjects; 2462 2463 preempt_disable(); 2464 do { 2465 pages = 0; 2466 pobjects = 0; 2467 oldpage = this_cpu_read(s->cpu_slab->partial); 2468 2469 if (oldpage) { 2470 pobjects = oldpage->pobjects; 2471 pages = oldpage->pages; 2472 if (drain && pobjects > slub_cpu_partial(s)) { 2473 unsigned long flags; 2474 /* 2475 * partial array is full. Move the existing 2476 * set to the per node partial list. 2477 */ 2478 local_irq_save(flags); 2479 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2480 local_irq_restore(flags); 2481 oldpage = NULL; 2482 pobjects = 0; 2483 pages = 0; 2484 stat(s, CPU_PARTIAL_DRAIN); 2485 } 2486 } 2487 2488 pages++; 2489 pobjects += page->objects - page->inuse; 2490 2491 page->pages = pages; 2492 page->pobjects = pobjects; 2493 page->next = oldpage; 2494 2495 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) 2496 != oldpage); 2497 if (unlikely(!slub_cpu_partial(s))) { 2498 unsigned long flags; 2499 2500 local_irq_save(flags); 2501 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2502 local_irq_restore(flags); 2503 } 2504 preempt_enable(); 2505 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2506 } 2507 2508 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2509 { 2510 stat(s, CPUSLAB_FLUSH); 2511 deactivate_slab(s, c->page, c->freelist, c); 2512 2513 c->tid = next_tid(c->tid); 2514 } 2515 2516 /* 2517 * Flush cpu slab. 2518 * 2519 * Called from IPI handler with interrupts disabled. 2520 */ 2521 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2522 { 2523 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2524 2525 if (c->page) 2526 flush_slab(s, c); 2527 2528 unfreeze_partials(s, c); 2529 } 2530 2531 static void flush_cpu_slab(void *d) 2532 { 2533 struct kmem_cache *s = d; 2534 2535 __flush_cpu_slab(s, smp_processor_id()); 2536 } 2537 2538 static bool has_cpu_slab(int cpu, void *info) 2539 { 2540 struct kmem_cache *s = info; 2541 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2542 2543 return c->page || slub_percpu_partial(c); 2544 } 2545 2546 static void flush_all(struct kmem_cache *s) 2547 { 2548 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); 2549 } 2550 2551 /* 2552 * Use the cpu notifier to insure that the cpu slabs are flushed when 2553 * necessary. 2554 */ 2555 static int slub_cpu_dead(unsigned int cpu) 2556 { 2557 struct kmem_cache *s; 2558 unsigned long flags; 2559 2560 mutex_lock(&slab_mutex); 2561 list_for_each_entry(s, &slab_caches, list) { 2562 local_irq_save(flags); 2563 __flush_cpu_slab(s, cpu); 2564 local_irq_restore(flags); 2565 } 2566 mutex_unlock(&slab_mutex); 2567 return 0; 2568 } 2569 2570 /* 2571 * Check if the objects in a per cpu structure fit numa 2572 * locality expectations. 2573 */ 2574 static inline int node_match(struct page *page, int node) 2575 { 2576 #ifdef CONFIG_NUMA 2577 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2578 return 0; 2579 #endif 2580 return 1; 2581 } 2582 2583 #ifdef CONFIG_SLUB_DEBUG 2584 static int count_free(struct page *page) 2585 { 2586 return page->objects - page->inuse; 2587 } 2588 2589 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2590 { 2591 return atomic_long_read(&n->total_objects); 2592 } 2593 #endif /* CONFIG_SLUB_DEBUG */ 2594 2595 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2596 static unsigned long count_partial(struct kmem_cache_node *n, 2597 int (*get_count)(struct page *)) 2598 { 2599 unsigned long flags; 2600 unsigned long x = 0; 2601 struct page *page; 2602 2603 spin_lock_irqsave(&n->list_lock, flags); 2604 list_for_each_entry(page, &n->partial, slab_list) 2605 x += get_count(page); 2606 spin_unlock_irqrestore(&n->list_lock, flags); 2607 return x; 2608 } 2609 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2610 2611 static noinline void 2612 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2613 { 2614 #ifdef CONFIG_SLUB_DEBUG 2615 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2616 DEFAULT_RATELIMIT_BURST); 2617 int node; 2618 struct kmem_cache_node *n; 2619 2620 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2621 return; 2622 2623 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2624 nid, gfpflags, &gfpflags); 2625 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2626 s->name, s->object_size, s->size, oo_order(s->oo), 2627 oo_order(s->min)); 2628 2629 if (oo_order(s->min) > get_order(s->object_size)) 2630 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2631 s->name); 2632 2633 for_each_kmem_cache_node(s, node, n) { 2634 unsigned long nr_slabs; 2635 unsigned long nr_objs; 2636 unsigned long nr_free; 2637 2638 nr_free = count_partial(n, count_free); 2639 nr_slabs = node_nr_slabs(n); 2640 nr_objs = node_nr_objs(n); 2641 2642 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2643 node, nr_slabs, nr_objs, nr_free); 2644 } 2645 #endif 2646 } 2647 2648 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2649 int node, struct kmem_cache_cpu **pc) 2650 { 2651 void *freelist; 2652 struct kmem_cache_cpu *c = *pc; 2653 struct page *page; 2654 2655 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2656 2657 freelist = get_partial(s, flags, node, c); 2658 2659 if (freelist) 2660 return freelist; 2661 2662 page = new_slab(s, flags, node); 2663 if (page) { 2664 c = raw_cpu_ptr(s->cpu_slab); 2665 if (c->page) 2666 flush_slab(s, c); 2667 2668 /* 2669 * No other reference to the page yet so we can 2670 * muck around with it freely without cmpxchg 2671 */ 2672 freelist = page->freelist; 2673 page->freelist = NULL; 2674 2675 stat(s, ALLOC_SLAB); 2676 c->page = page; 2677 *pc = c; 2678 } 2679 2680 return freelist; 2681 } 2682 2683 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2684 { 2685 if (unlikely(PageSlabPfmemalloc(page))) 2686 return gfp_pfmemalloc_allowed(gfpflags); 2687 2688 return true; 2689 } 2690 2691 /* 2692 * Check the page->freelist of a page and either transfer the freelist to the 2693 * per cpu freelist or deactivate the page. 2694 * 2695 * The page is still frozen if the return value is not NULL. 2696 * 2697 * If this function returns NULL then the page has been unfrozen. 2698 * 2699 * This function must be called with interrupt disabled. 2700 */ 2701 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2702 { 2703 struct page new; 2704 unsigned long counters; 2705 void *freelist; 2706 2707 do { 2708 freelist = page->freelist; 2709 counters = page->counters; 2710 2711 new.counters = counters; 2712 VM_BUG_ON(!new.frozen); 2713 2714 new.inuse = page->objects; 2715 new.frozen = freelist != NULL; 2716 2717 } while (!__cmpxchg_double_slab(s, page, 2718 freelist, counters, 2719 NULL, new.counters, 2720 "get_freelist")); 2721 2722 return freelist; 2723 } 2724 2725 /* 2726 * Slow path. The lockless freelist is empty or we need to perform 2727 * debugging duties. 2728 * 2729 * Processing is still very fast if new objects have been freed to the 2730 * regular freelist. In that case we simply take over the regular freelist 2731 * as the lockless freelist and zap the regular freelist. 2732 * 2733 * If that is not working then we fall back to the partial lists. We take the 2734 * first element of the freelist as the object to allocate now and move the 2735 * rest of the freelist to the lockless freelist. 2736 * 2737 * And if we were unable to get a new slab from the partial slab lists then 2738 * we need to allocate a new slab. This is the slowest path since it involves 2739 * a call to the page allocator and the setup of a new slab. 2740 * 2741 * Version of __slab_alloc to use when we know that interrupts are 2742 * already disabled (which is the case for bulk allocation). 2743 */ 2744 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2745 unsigned long addr, struct kmem_cache_cpu *c) 2746 { 2747 void *freelist; 2748 struct page *page; 2749 2750 stat(s, ALLOC_SLOWPATH); 2751 2752 page = c->page; 2753 if (!page) { 2754 /* 2755 * if the node is not online or has no normal memory, just 2756 * ignore the node constraint 2757 */ 2758 if (unlikely(node != NUMA_NO_NODE && 2759 !node_isset(node, slab_nodes))) 2760 node = NUMA_NO_NODE; 2761 goto new_slab; 2762 } 2763 redo: 2764 2765 if (unlikely(!node_match(page, node))) { 2766 /* 2767 * same as above but node_match() being false already 2768 * implies node != NUMA_NO_NODE 2769 */ 2770 if (!node_isset(node, slab_nodes)) { 2771 node = NUMA_NO_NODE; 2772 goto redo; 2773 } else { 2774 stat(s, ALLOC_NODE_MISMATCH); 2775 deactivate_slab(s, page, c->freelist, c); 2776 goto new_slab; 2777 } 2778 } 2779 2780 /* 2781 * By rights, we should be searching for a slab page that was 2782 * PFMEMALLOC but right now, we are losing the pfmemalloc 2783 * information when the page leaves the per-cpu allocator 2784 */ 2785 if (unlikely(!pfmemalloc_match(page, gfpflags))) { 2786 deactivate_slab(s, page, c->freelist, c); 2787 goto new_slab; 2788 } 2789 2790 /* must check again c->freelist in case of cpu migration or IRQ */ 2791 freelist = c->freelist; 2792 if (freelist) 2793 goto load_freelist; 2794 2795 freelist = get_freelist(s, page); 2796 2797 if (!freelist) { 2798 c->page = NULL; 2799 stat(s, DEACTIVATE_BYPASS); 2800 goto new_slab; 2801 } 2802 2803 stat(s, ALLOC_REFILL); 2804 2805 load_freelist: 2806 /* 2807 * freelist is pointing to the list of objects to be used. 2808 * page is pointing to the page from which the objects are obtained. 2809 * That page must be frozen for per cpu allocations to work. 2810 */ 2811 VM_BUG_ON(!c->page->frozen); 2812 c->freelist = get_freepointer(s, freelist); 2813 c->tid = next_tid(c->tid); 2814 return freelist; 2815 2816 new_slab: 2817 2818 if (slub_percpu_partial(c)) { 2819 page = c->page = slub_percpu_partial(c); 2820 slub_set_percpu_partial(c, page); 2821 stat(s, CPU_PARTIAL_ALLOC); 2822 goto redo; 2823 } 2824 2825 freelist = new_slab_objects(s, gfpflags, node, &c); 2826 2827 if (unlikely(!freelist)) { 2828 slab_out_of_memory(s, gfpflags, node); 2829 return NULL; 2830 } 2831 2832 page = c->page; 2833 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) 2834 goto load_freelist; 2835 2836 /* Only entered in the debug case */ 2837 if (kmem_cache_debug(s) && 2838 !alloc_debug_processing(s, page, freelist, addr)) 2839 goto new_slab; /* Slab failed checks. Next slab needed */ 2840 2841 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2842 return freelist; 2843 } 2844 2845 /* 2846 * Another one that disabled interrupt and compensates for possible 2847 * cpu changes by refetching the per cpu area pointer. 2848 */ 2849 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2850 unsigned long addr, struct kmem_cache_cpu *c) 2851 { 2852 void *p; 2853 unsigned long flags; 2854 2855 local_irq_save(flags); 2856 #ifdef CONFIG_PREEMPTION 2857 /* 2858 * We may have been preempted and rescheduled on a different 2859 * cpu before disabling interrupts. Need to reload cpu area 2860 * pointer. 2861 */ 2862 c = this_cpu_ptr(s->cpu_slab); 2863 #endif 2864 2865 p = ___slab_alloc(s, gfpflags, node, addr, c); 2866 local_irq_restore(flags); 2867 return p; 2868 } 2869 2870 /* 2871 * If the object has been wiped upon free, make sure it's fully initialized by 2872 * zeroing out freelist pointer. 2873 */ 2874 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 2875 void *obj) 2876 { 2877 if (unlikely(slab_want_init_on_free(s)) && obj) 2878 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 2879 0, sizeof(void *)); 2880 } 2881 2882 /* 2883 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2884 * have the fastpath folded into their functions. So no function call 2885 * overhead for requests that can be satisfied on the fastpath. 2886 * 2887 * The fastpath works by first checking if the lockless freelist can be used. 2888 * If not then __slab_alloc is called for slow processing. 2889 * 2890 * Otherwise we can simply pick the next object from the lockless free list. 2891 */ 2892 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2893 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 2894 { 2895 void *object; 2896 struct kmem_cache_cpu *c; 2897 struct page *page; 2898 unsigned long tid; 2899 struct obj_cgroup *objcg = NULL; 2900 bool init = false; 2901 2902 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); 2903 if (!s) 2904 return NULL; 2905 2906 object = kfence_alloc(s, orig_size, gfpflags); 2907 if (unlikely(object)) 2908 goto out; 2909 2910 redo: 2911 /* 2912 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2913 * enabled. We may switch back and forth between cpus while 2914 * reading from one cpu area. That does not matter as long 2915 * as we end up on the original cpu again when doing the cmpxchg. 2916 * 2917 * We should guarantee that tid and kmem_cache are retrieved on 2918 * the same cpu. It could be different if CONFIG_PREEMPTION so we need 2919 * to check if it is matched or not. 2920 */ 2921 do { 2922 tid = this_cpu_read(s->cpu_slab->tid); 2923 c = raw_cpu_ptr(s->cpu_slab); 2924 } while (IS_ENABLED(CONFIG_PREEMPTION) && 2925 unlikely(tid != READ_ONCE(c->tid))); 2926 2927 /* 2928 * Irqless object alloc/free algorithm used here depends on sequence 2929 * of fetching cpu_slab's data. tid should be fetched before anything 2930 * on c to guarantee that object and page associated with previous tid 2931 * won't be used with current tid. If we fetch tid first, object and 2932 * page could be one associated with next tid and our alloc/free 2933 * request will be failed. In this case, we will retry. So, no problem. 2934 */ 2935 barrier(); 2936 2937 /* 2938 * The transaction ids are globally unique per cpu and per operation on 2939 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2940 * occurs on the right processor and that there was no operation on the 2941 * linked list in between. 2942 */ 2943 2944 object = c->freelist; 2945 page = c->page; 2946 if (unlikely(!object || !page || !node_match(page, node))) { 2947 object = __slab_alloc(s, gfpflags, node, addr, c); 2948 } else { 2949 void *next_object = get_freepointer_safe(s, object); 2950 2951 /* 2952 * The cmpxchg will only match if there was no additional 2953 * operation and if we are on the right processor. 2954 * 2955 * The cmpxchg does the following atomically (without lock 2956 * semantics!) 2957 * 1. Relocate first pointer to the current per cpu area. 2958 * 2. Verify that tid and freelist have not been changed 2959 * 3. If they were not changed replace tid and freelist 2960 * 2961 * Since this is without lock semantics the protection is only 2962 * against code executing on this cpu *not* from access by 2963 * other cpus. 2964 */ 2965 if (unlikely(!this_cpu_cmpxchg_double( 2966 s->cpu_slab->freelist, s->cpu_slab->tid, 2967 object, tid, 2968 next_object, next_tid(tid)))) { 2969 2970 note_cmpxchg_failure("slab_alloc", s, tid); 2971 goto redo; 2972 } 2973 prefetch_freepointer(s, next_object); 2974 stat(s, ALLOC_FASTPATH); 2975 } 2976 2977 maybe_wipe_obj_freeptr(s, object); 2978 init = slab_want_init_on_alloc(gfpflags, s); 2979 2980 out: 2981 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); 2982 2983 return object; 2984 } 2985 2986 static __always_inline void *slab_alloc(struct kmem_cache *s, 2987 gfp_t gfpflags, unsigned long addr, size_t orig_size) 2988 { 2989 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); 2990 } 2991 2992 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2993 { 2994 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); 2995 2996 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 2997 s->size, gfpflags); 2998 2999 return ret; 3000 } 3001 EXPORT_SYMBOL(kmem_cache_alloc); 3002 3003 #ifdef CONFIG_TRACING 3004 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 3005 { 3006 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); 3007 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 3008 ret = kasan_kmalloc(s, ret, size, gfpflags); 3009 return ret; 3010 } 3011 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3012 #endif 3013 3014 #ifdef CONFIG_NUMA 3015 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3016 { 3017 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); 3018 3019 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3020 s->object_size, s->size, gfpflags, node); 3021 3022 return ret; 3023 } 3024 EXPORT_SYMBOL(kmem_cache_alloc_node); 3025 3026 #ifdef CONFIG_TRACING 3027 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 3028 gfp_t gfpflags, 3029 int node, size_t size) 3030 { 3031 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); 3032 3033 trace_kmalloc_node(_RET_IP_, ret, 3034 size, s->size, gfpflags, node); 3035 3036 ret = kasan_kmalloc(s, ret, size, gfpflags); 3037 return ret; 3038 } 3039 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3040 #endif 3041 #endif /* CONFIG_NUMA */ 3042 3043 /* 3044 * Slow path handling. This may still be called frequently since objects 3045 * have a longer lifetime than the cpu slabs in most processing loads. 3046 * 3047 * So we still attempt to reduce cache line usage. Just take the slab 3048 * lock and free the item. If there is no additional partial page 3049 * handling required then we can return immediately. 3050 */ 3051 static void __slab_free(struct kmem_cache *s, struct page *page, 3052 void *head, void *tail, int cnt, 3053 unsigned long addr) 3054 3055 { 3056 void *prior; 3057 int was_frozen; 3058 struct page new; 3059 unsigned long counters; 3060 struct kmem_cache_node *n = NULL; 3061 unsigned long flags; 3062 3063 stat(s, FREE_SLOWPATH); 3064 3065 if (kfence_free(head)) 3066 return; 3067 3068 if (kmem_cache_debug(s) && 3069 !free_debug_processing(s, page, head, tail, cnt, addr)) 3070 return; 3071 3072 do { 3073 if (unlikely(n)) { 3074 spin_unlock_irqrestore(&n->list_lock, flags); 3075 n = NULL; 3076 } 3077 prior = page->freelist; 3078 counters = page->counters; 3079 set_freepointer(s, tail, prior); 3080 new.counters = counters; 3081 was_frozen = new.frozen; 3082 new.inuse -= cnt; 3083 if ((!new.inuse || !prior) && !was_frozen) { 3084 3085 if (kmem_cache_has_cpu_partial(s) && !prior) { 3086 3087 /* 3088 * Slab was on no list before and will be 3089 * partially empty 3090 * We can defer the list move and instead 3091 * freeze it. 3092 */ 3093 new.frozen = 1; 3094 3095 } else { /* Needs to be taken off a list */ 3096 3097 n = get_node(s, page_to_nid(page)); 3098 /* 3099 * Speculatively acquire the list_lock. 3100 * If the cmpxchg does not succeed then we may 3101 * drop the list_lock without any processing. 3102 * 3103 * Otherwise the list_lock will synchronize with 3104 * other processors updating the list of slabs. 3105 */ 3106 spin_lock_irqsave(&n->list_lock, flags); 3107 3108 } 3109 } 3110 3111 } while (!cmpxchg_double_slab(s, page, 3112 prior, counters, 3113 head, new.counters, 3114 "__slab_free")); 3115 3116 if (likely(!n)) { 3117 3118 if (likely(was_frozen)) { 3119 /* 3120 * The list lock was not taken therefore no list 3121 * activity can be necessary. 3122 */ 3123 stat(s, FREE_FROZEN); 3124 } else if (new.frozen) { 3125 /* 3126 * If we just froze the page then put it onto the 3127 * per cpu partial list. 3128 */ 3129 put_cpu_partial(s, page, 1); 3130 stat(s, CPU_PARTIAL_FREE); 3131 } 3132 3133 return; 3134 } 3135 3136 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3137 goto slab_empty; 3138 3139 /* 3140 * Objects left in the slab. If it was not on the partial list before 3141 * then add it. 3142 */ 3143 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3144 remove_full(s, n, page); 3145 add_partial(n, page, DEACTIVATE_TO_TAIL); 3146 stat(s, FREE_ADD_PARTIAL); 3147 } 3148 spin_unlock_irqrestore(&n->list_lock, flags); 3149 return; 3150 3151 slab_empty: 3152 if (prior) { 3153 /* 3154 * Slab on the partial list. 3155 */ 3156 remove_partial(n, page); 3157 stat(s, FREE_REMOVE_PARTIAL); 3158 } else { 3159 /* Slab must be on the full list */ 3160 remove_full(s, n, page); 3161 } 3162 3163 spin_unlock_irqrestore(&n->list_lock, flags); 3164 stat(s, FREE_SLAB); 3165 discard_slab(s, page); 3166 } 3167 3168 /* 3169 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3170 * can perform fastpath freeing without additional function calls. 3171 * 3172 * The fastpath is only possible if we are freeing to the current cpu slab 3173 * of this processor. This typically the case if we have just allocated 3174 * the item before. 3175 * 3176 * If fastpath is not possible then fall back to __slab_free where we deal 3177 * with all sorts of special processing. 3178 * 3179 * Bulk free of a freelist with several objects (all pointing to the 3180 * same page) possible by specifying head and tail ptr, plus objects 3181 * count (cnt). Bulk free indicated by tail pointer being set. 3182 */ 3183 static __always_inline void do_slab_free(struct kmem_cache *s, 3184 struct page *page, void *head, void *tail, 3185 int cnt, unsigned long addr) 3186 { 3187 void *tail_obj = tail ? : head; 3188 struct kmem_cache_cpu *c; 3189 unsigned long tid; 3190 3191 memcg_slab_free_hook(s, &head, 1); 3192 redo: 3193 /* 3194 * Determine the currently cpus per cpu slab. 3195 * The cpu may change afterward. However that does not matter since 3196 * data is retrieved via this pointer. If we are on the same cpu 3197 * during the cmpxchg then the free will succeed. 3198 */ 3199 do { 3200 tid = this_cpu_read(s->cpu_slab->tid); 3201 c = raw_cpu_ptr(s->cpu_slab); 3202 } while (IS_ENABLED(CONFIG_PREEMPTION) && 3203 unlikely(tid != READ_ONCE(c->tid))); 3204 3205 /* Same with comment on barrier() in slab_alloc_node() */ 3206 barrier(); 3207 3208 if (likely(page == c->page)) { 3209 void **freelist = READ_ONCE(c->freelist); 3210 3211 set_freepointer(s, tail_obj, freelist); 3212 3213 if (unlikely(!this_cpu_cmpxchg_double( 3214 s->cpu_slab->freelist, s->cpu_slab->tid, 3215 freelist, tid, 3216 head, next_tid(tid)))) { 3217 3218 note_cmpxchg_failure("slab_free", s, tid); 3219 goto redo; 3220 } 3221 stat(s, FREE_FASTPATH); 3222 } else 3223 __slab_free(s, page, head, tail_obj, cnt, addr); 3224 3225 } 3226 3227 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 3228 void *head, void *tail, int cnt, 3229 unsigned long addr) 3230 { 3231 /* 3232 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3233 * to remove objects, whose reuse must be delayed. 3234 */ 3235 if (slab_free_freelist_hook(s, &head, &tail)) 3236 do_slab_free(s, page, head, tail, cnt, addr); 3237 } 3238 3239 #ifdef CONFIG_KASAN_GENERIC 3240 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3241 { 3242 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 3243 } 3244 #endif 3245 3246 void kmem_cache_free(struct kmem_cache *s, void *x) 3247 { 3248 s = cache_from_obj(s, x); 3249 if (!s) 3250 return; 3251 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3252 trace_kmem_cache_free(_RET_IP_, x, s->name); 3253 } 3254 EXPORT_SYMBOL(kmem_cache_free); 3255 3256 struct detached_freelist { 3257 struct page *page; 3258 void *tail; 3259 void *freelist; 3260 int cnt; 3261 struct kmem_cache *s; 3262 }; 3263 3264 /* 3265 * This function progressively scans the array with free objects (with 3266 * a limited look ahead) and extract objects belonging to the same 3267 * page. It builds a detached freelist directly within the given 3268 * page/objects. This can happen without any need for 3269 * synchronization, because the objects are owned by running process. 3270 * The freelist is build up as a single linked list in the objects. 3271 * The idea is, that this detached freelist can then be bulk 3272 * transferred to the real freelist(s), but only requiring a single 3273 * synchronization primitive. Look ahead in the array is limited due 3274 * to performance reasons. 3275 */ 3276 static inline 3277 int build_detached_freelist(struct kmem_cache *s, size_t size, 3278 void **p, struct detached_freelist *df) 3279 { 3280 size_t first_skipped_index = 0; 3281 int lookahead = 3; 3282 void *object; 3283 struct page *page; 3284 3285 /* Always re-init detached_freelist */ 3286 df->page = NULL; 3287 3288 do { 3289 object = p[--size]; 3290 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3291 } while (!object && size); 3292 3293 if (!object) 3294 return 0; 3295 3296 page = virt_to_head_page(object); 3297 if (!s) { 3298 /* Handle kalloc'ed objects */ 3299 if (unlikely(!PageSlab(page))) { 3300 BUG_ON(!PageCompound(page)); 3301 kfree_hook(object); 3302 __free_pages(page, compound_order(page)); 3303 p[size] = NULL; /* mark object processed */ 3304 return size; 3305 } 3306 /* Derive kmem_cache from object */ 3307 df->s = page->slab_cache; 3308 } else { 3309 df->s = cache_from_obj(s, object); /* Support for memcg */ 3310 } 3311 3312 if (is_kfence_address(object)) { 3313 slab_free_hook(df->s, object, false); 3314 __kfence_free(object); 3315 p[size] = NULL; /* mark object processed */ 3316 return size; 3317 } 3318 3319 /* Start new detached freelist */ 3320 df->page = page; 3321 set_freepointer(df->s, object, NULL); 3322 df->tail = object; 3323 df->freelist = object; 3324 p[size] = NULL; /* mark object processed */ 3325 df->cnt = 1; 3326 3327 while (size) { 3328 object = p[--size]; 3329 if (!object) 3330 continue; /* Skip processed objects */ 3331 3332 /* df->page is always set at this point */ 3333 if (df->page == virt_to_head_page(object)) { 3334 /* Opportunity build freelist */ 3335 set_freepointer(df->s, object, df->freelist); 3336 df->freelist = object; 3337 df->cnt++; 3338 p[size] = NULL; /* mark object processed */ 3339 3340 continue; 3341 } 3342 3343 /* Limit look ahead search */ 3344 if (!--lookahead) 3345 break; 3346 3347 if (!first_skipped_index) 3348 first_skipped_index = size + 1; 3349 } 3350 3351 return first_skipped_index; 3352 } 3353 3354 /* Note that interrupts must be enabled when calling this function. */ 3355 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3356 { 3357 if (WARN_ON(!size)) 3358 return; 3359 3360 memcg_slab_free_hook(s, p, size); 3361 do { 3362 struct detached_freelist df; 3363 3364 size = build_detached_freelist(s, size, p, &df); 3365 if (!df.page) 3366 continue; 3367 3368 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 3369 } while (likely(size)); 3370 } 3371 EXPORT_SYMBOL(kmem_cache_free_bulk); 3372 3373 /* Note that interrupts must be enabled when calling this function. */ 3374 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3375 void **p) 3376 { 3377 struct kmem_cache_cpu *c; 3378 int i; 3379 struct obj_cgroup *objcg = NULL; 3380 3381 /* memcg and kmem_cache debug support */ 3382 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3383 if (unlikely(!s)) 3384 return false; 3385 /* 3386 * Drain objects in the per cpu slab, while disabling local 3387 * IRQs, which protects against PREEMPT and interrupts 3388 * handlers invoking normal fastpath. 3389 */ 3390 local_irq_disable(); 3391 c = this_cpu_ptr(s->cpu_slab); 3392 3393 for (i = 0; i < size; i++) { 3394 void *object = kfence_alloc(s, s->object_size, flags); 3395 3396 if (unlikely(object)) { 3397 p[i] = object; 3398 continue; 3399 } 3400 3401 object = c->freelist; 3402 if (unlikely(!object)) { 3403 /* 3404 * We may have removed an object from c->freelist using 3405 * the fastpath in the previous iteration; in that case, 3406 * c->tid has not been bumped yet. 3407 * Since ___slab_alloc() may reenable interrupts while 3408 * allocating memory, we should bump c->tid now. 3409 */ 3410 c->tid = next_tid(c->tid); 3411 3412 /* 3413 * Invoking slow path likely have side-effect 3414 * of re-populating per CPU c->freelist 3415 */ 3416 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3417 _RET_IP_, c); 3418 if (unlikely(!p[i])) 3419 goto error; 3420 3421 c = this_cpu_ptr(s->cpu_slab); 3422 maybe_wipe_obj_freeptr(s, p[i]); 3423 3424 continue; /* goto for-loop */ 3425 } 3426 c->freelist = get_freepointer(s, object); 3427 p[i] = object; 3428 maybe_wipe_obj_freeptr(s, p[i]); 3429 } 3430 c->tid = next_tid(c->tid); 3431 local_irq_enable(); 3432 3433 /* 3434 * memcg and kmem_cache debug support and memory initialization. 3435 * Done outside of the IRQ disabled fastpath loop. 3436 */ 3437 slab_post_alloc_hook(s, objcg, flags, size, p, 3438 slab_want_init_on_alloc(flags, s)); 3439 return i; 3440 error: 3441 local_irq_enable(); 3442 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3443 __kmem_cache_free_bulk(s, i, p); 3444 return 0; 3445 } 3446 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3447 3448 3449 /* 3450 * Object placement in a slab is made very easy because we always start at 3451 * offset 0. If we tune the size of the object to the alignment then we can 3452 * get the required alignment by putting one properly sized object after 3453 * another. 3454 * 3455 * Notice that the allocation order determines the sizes of the per cpu 3456 * caches. Each processor has always one slab available for allocations. 3457 * Increasing the allocation order reduces the number of times that slabs 3458 * must be moved on and off the partial lists and is therefore a factor in 3459 * locking overhead. 3460 */ 3461 3462 /* 3463 * Minimum / Maximum order of slab pages. This influences locking overhead 3464 * and slab fragmentation. A higher order reduces the number of partial slabs 3465 * and increases the number of allocations possible without having to 3466 * take the list_lock. 3467 */ 3468 static unsigned int slub_min_order; 3469 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3470 static unsigned int slub_min_objects; 3471 3472 /* 3473 * Calculate the order of allocation given an slab object size. 3474 * 3475 * The order of allocation has significant impact on performance and other 3476 * system components. Generally order 0 allocations should be preferred since 3477 * order 0 does not cause fragmentation in the page allocator. Larger objects 3478 * be problematic to put into order 0 slabs because there may be too much 3479 * unused space left. We go to a higher order if more than 1/16th of the slab 3480 * would be wasted. 3481 * 3482 * In order to reach satisfactory performance we must ensure that a minimum 3483 * number of objects is in one slab. Otherwise we may generate too much 3484 * activity on the partial lists which requires taking the list_lock. This is 3485 * less a concern for large slabs though which are rarely used. 3486 * 3487 * slub_max_order specifies the order where we begin to stop considering the 3488 * number of objects in a slab as critical. If we reach slub_max_order then 3489 * we try to keep the page order as low as possible. So we accept more waste 3490 * of space in favor of a small page order. 3491 * 3492 * Higher order allocations also allow the placement of more objects in a 3493 * slab and thereby reduce object handling overhead. If the user has 3494 * requested a higher minimum order then we start with that one instead of 3495 * the smallest order which will fit the object. 3496 */ 3497 static inline unsigned int slab_order(unsigned int size, 3498 unsigned int min_objects, unsigned int max_order, 3499 unsigned int fract_leftover) 3500 { 3501 unsigned int min_order = slub_min_order; 3502 unsigned int order; 3503 3504 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3505 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3506 3507 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3508 order <= max_order; order++) { 3509 3510 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3511 unsigned int rem; 3512 3513 rem = slab_size % size; 3514 3515 if (rem <= slab_size / fract_leftover) 3516 break; 3517 } 3518 3519 return order; 3520 } 3521 3522 static inline int calculate_order(unsigned int size) 3523 { 3524 unsigned int order; 3525 unsigned int min_objects; 3526 unsigned int max_objects; 3527 unsigned int nr_cpus; 3528 3529 /* 3530 * Attempt to find best configuration for a slab. This 3531 * works by first attempting to generate a layout with 3532 * the best configuration and backing off gradually. 3533 * 3534 * First we increase the acceptable waste in a slab. Then 3535 * we reduce the minimum objects required in a slab. 3536 */ 3537 min_objects = slub_min_objects; 3538 if (!min_objects) { 3539 /* 3540 * Some architectures will only update present cpus when 3541 * onlining them, so don't trust the number if it's just 1. But 3542 * we also don't want to use nr_cpu_ids always, as on some other 3543 * architectures, there can be many possible cpus, but never 3544 * onlined. Here we compromise between trying to avoid too high 3545 * order on systems that appear larger than they are, and too 3546 * low order on systems that appear smaller than they are. 3547 */ 3548 nr_cpus = num_present_cpus(); 3549 if (nr_cpus <= 1) 3550 nr_cpus = nr_cpu_ids; 3551 min_objects = 4 * (fls(nr_cpus) + 1); 3552 } 3553 max_objects = order_objects(slub_max_order, size); 3554 min_objects = min(min_objects, max_objects); 3555 3556 while (min_objects > 1) { 3557 unsigned int fraction; 3558 3559 fraction = 16; 3560 while (fraction >= 4) { 3561 order = slab_order(size, min_objects, 3562 slub_max_order, fraction); 3563 if (order <= slub_max_order) 3564 return order; 3565 fraction /= 2; 3566 } 3567 min_objects--; 3568 } 3569 3570 /* 3571 * We were unable to place multiple objects in a slab. Now 3572 * lets see if we can place a single object there. 3573 */ 3574 order = slab_order(size, 1, slub_max_order, 1); 3575 if (order <= slub_max_order) 3576 return order; 3577 3578 /* 3579 * Doh this slab cannot be placed using slub_max_order. 3580 */ 3581 order = slab_order(size, 1, MAX_ORDER, 1); 3582 if (order < MAX_ORDER) 3583 return order; 3584 return -ENOSYS; 3585 } 3586 3587 static void 3588 init_kmem_cache_node(struct kmem_cache_node *n) 3589 { 3590 n->nr_partial = 0; 3591 spin_lock_init(&n->list_lock); 3592 INIT_LIST_HEAD(&n->partial); 3593 #ifdef CONFIG_SLUB_DEBUG 3594 atomic_long_set(&n->nr_slabs, 0); 3595 atomic_long_set(&n->total_objects, 0); 3596 INIT_LIST_HEAD(&n->full); 3597 #endif 3598 } 3599 3600 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3601 { 3602 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3603 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3604 3605 /* 3606 * Must align to double word boundary for the double cmpxchg 3607 * instructions to work; see __pcpu_double_call_return_bool(). 3608 */ 3609 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3610 2 * sizeof(void *)); 3611 3612 if (!s->cpu_slab) 3613 return 0; 3614 3615 init_kmem_cache_cpus(s); 3616 3617 return 1; 3618 } 3619 3620 static struct kmem_cache *kmem_cache_node; 3621 3622 /* 3623 * No kmalloc_node yet so do it by hand. We know that this is the first 3624 * slab on the node for this slabcache. There are no concurrent accesses 3625 * possible. 3626 * 3627 * Note that this function only works on the kmem_cache_node 3628 * when allocating for the kmem_cache_node. This is used for bootstrapping 3629 * memory on a fresh node that has no slab structures yet. 3630 */ 3631 static void early_kmem_cache_node_alloc(int node) 3632 { 3633 struct page *page; 3634 struct kmem_cache_node *n; 3635 3636 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3637 3638 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3639 3640 BUG_ON(!page); 3641 if (page_to_nid(page) != node) { 3642 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3643 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3644 } 3645 3646 n = page->freelist; 3647 BUG_ON(!n); 3648 #ifdef CONFIG_SLUB_DEBUG 3649 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3650 init_tracking(kmem_cache_node, n); 3651 #endif 3652 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 3653 page->freelist = get_freepointer(kmem_cache_node, n); 3654 page->inuse = 1; 3655 page->frozen = 0; 3656 kmem_cache_node->node[node] = n; 3657 init_kmem_cache_node(n); 3658 inc_slabs_node(kmem_cache_node, node, page->objects); 3659 3660 /* 3661 * No locks need to be taken here as it has just been 3662 * initialized and there is no concurrent access. 3663 */ 3664 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3665 } 3666 3667 static void free_kmem_cache_nodes(struct kmem_cache *s) 3668 { 3669 int node; 3670 struct kmem_cache_node *n; 3671 3672 for_each_kmem_cache_node(s, node, n) { 3673 s->node[node] = NULL; 3674 kmem_cache_free(kmem_cache_node, n); 3675 } 3676 } 3677 3678 void __kmem_cache_release(struct kmem_cache *s) 3679 { 3680 cache_random_seq_destroy(s); 3681 free_percpu(s->cpu_slab); 3682 free_kmem_cache_nodes(s); 3683 } 3684 3685 static int init_kmem_cache_nodes(struct kmem_cache *s) 3686 { 3687 int node; 3688 3689 for_each_node_mask(node, slab_nodes) { 3690 struct kmem_cache_node *n; 3691 3692 if (slab_state == DOWN) { 3693 early_kmem_cache_node_alloc(node); 3694 continue; 3695 } 3696 n = kmem_cache_alloc_node(kmem_cache_node, 3697 GFP_KERNEL, node); 3698 3699 if (!n) { 3700 free_kmem_cache_nodes(s); 3701 return 0; 3702 } 3703 3704 init_kmem_cache_node(n); 3705 s->node[node] = n; 3706 } 3707 return 1; 3708 } 3709 3710 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3711 { 3712 if (min < MIN_PARTIAL) 3713 min = MIN_PARTIAL; 3714 else if (min > MAX_PARTIAL) 3715 min = MAX_PARTIAL; 3716 s->min_partial = min; 3717 } 3718 3719 static void set_cpu_partial(struct kmem_cache *s) 3720 { 3721 #ifdef CONFIG_SLUB_CPU_PARTIAL 3722 /* 3723 * cpu_partial determined the maximum number of objects kept in the 3724 * per cpu partial lists of a processor. 3725 * 3726 * Per cpu partial lists mainly contain slabs that just have one 3727 * object freed. If they are used for allocation then they can be 3728 * filled up again with minimal effort. The slab will never hit the 3729 * per node partial lists and therefore no locking will be required. 3730 * 3731 * This setting also determines 3732 * 3733 * A) The number of objects from per cpu partial slabs dumped to the 3734 * per node list when we reach the limit. 3735 * B) The number of objects in cpu partial slabs to extract from the 3736 * per node list when we run out of per cpu objects. We only fetch 3737 * 50% to keep some capacity around for frees. 3738 */ 3739 if (!kmem_cache_has_cpu_partial(s)) 3740 slub_set_cpu_partial(s, 0); 3741 else if (s->size >= PAGE_SIZE) 3742 slub_set_cpu_partial(s, 2); 3743 else if (s->size >= 1024) 3744 slub_set_cpu_partial(s, 6); 3745 else if (s->size >= 256) 3746 slub_set_cpu_partial(s, 13); 3747 else 3748 slub_set_cpu_partial(s, 30); 3749 #endif 3750 } 3751 3752 /* 3753 * calculate_sizes() determines the order and the distribution of data within 3754 * a slab object. 3755 */ 3756 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3757 { 3758 slab_flags_t flags = s->flags; 3759 unsigned int size = s->object_size; 3760 unsigned int order; 3761 3762 /* 3763 * Round up object size to the next word boundary. We can only 3764 * place the free pointer at word boundaries and this determines 3765 * the possible location of the free pointer. 3766 */ 3767 size = ALIGN(size, sizeof(void *)); 3768 3769 #ifdef CONFIG_SLUB_DEBUG 3770 /* 3771 * Determine if we can poison the object itself. If the user of 3772 * the slab may touch the object after free or before allocation 3773 * then we should never poison the object itself. 3774 */ 3775 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 3776 !s->ctor) 3777 s->flags |= __OBJECT_POISON; 3778 else 3779 s->flags &= ~__OBJECT_POISON; 3780 3781 3782 /* 3783 * If we are Redzoning then check if there is some space between the 3784 * end of the object and the free pointer. If not then add an 3785 * additional word to have some bytes to store Redzone information. 3786 */ 3787 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 3788 size += sizeof(void *); 3789 #endif 3790 3791 /* 3792 * With that we have determined the number of bytes in actual use 3793 * by the object and redzoning. 3794 */ 3795 s->inuse = size; 3796 3797 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 3798 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 3799 s->ctor) { 3800 /* 3801 * Relocate free pointer after the object if it is not 3802 * permitted to overwrite the first word of the object on 3803 * kmem_cache_free. 3804 * 3805 * This is the case if we do RCU, have a constructor or 3806 * destructor, are poisoning the objects, or are 3807 * redzoning an object smaller than sizeof(void *). 3808 * 3809 * The assumption that s->offset >= s->inuse means free 3810 * pointer is outside of the object is used in the 3811 * freeptr_outside_object() function. If that is no 3812 * longer true, the function needs to be modified. 3813 */ 3814 s->offset = size; 3815 size += sizeof(void *); 3816 } else { 3817 /* 3818 * Store freelist pointer near middle of object to keep 3819 * it away from the edges of the object to avoid small 3820 * sized over/underflows from neighboring allocations. 3821 */ 3822 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 3823 } 3824 3825 #ifdef CONFIG_SLUB_DEBUG 3826 if (flags & SLAB_STORE_USER) 3827 /* 3828 * Need to store information about allocs and frees after 3829 * the object. 3830 */ 3831 size += 2 * sizeof(struct track); 3832 #endif 3833 3834 kasan_cache_create(s, &size, &s->flags); 3835 #ifdef CONFIG_SLUB_DEBUG 3836 if (flags & SLAB_RED_ZONE) { 3837 /* 3838 * Add some empty padding so that we can catch 3839 * overwrites from earlier objects rather than let 3840 * tracking information or the free pointer be 3841 * corrupted if a user writes before the start 3842 * of the object. 3843 */ 3844 size += sizeof(void *); 3845 3846 s->red_left_pad = sizeof(void *); 3847 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 3848 size += s->red_left_pad; 3849 } 3850 #endif 3851 3852 /* 3853 * SLUB stores one object immediately after another beginning from 3854 * offset 0. In order to align the objects we have to simply size 3855 * each object to conform to the alignment. 3856 */ 3857 size = ALIGN(size, s->align); 3858 s->size = size; 3859 s->reciprocal_size = reciprocal_value(size); 3860 if (forced_order >= 0) 3861 order = forced_order; 3862 else 3863 order = calculate_order(size); 3864 3865 if ((int)order < 0) 3866 return 0; 3867 3868 s->allocflags = 0; 3869 if (order) 3870 s->allocflags |= __GFP_COMP; 3871 3872 if (s->flags & SLAB_CACHE_DMA) 3873 s->allocflags |= GFP_DMA; 3874 3875 if (s->flags & SLAB_CACHE_DMA32) 3876 s->allocflags |= GFP_DMA32; 3877 3878 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3879 s->allocflags |= __GFP_RECLAIMABLE; 3880 3881 /* 3882 * Determine the number of objects per slab 3883 */ 3884 s->oo = oo_make(order, size); 3885 s->min = oo_make(get_order(size), size); 3886 if (oo_objects(s->oo) > oo_objects(s->max)) 3887 s->max = s->oo; 3888 3889 return !!oo_objects(s->oo); 3890 } 3891 3892 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3893 { 3894 s->flags = kmem_cache_flags(s->size, flags, s->name); 3895 #ifdef CONFIG_SLAB_FREELIST_HARDENED 3896 s->random = get_random_long(); 3897 #endif 3898 3899 if (!calculate_sizes(s, -1)) 3900 goto error; 3901 if (disable_higher_order_debug) { 3902 /* 3903 * Disable debugging flags that store metadata if the min slab 3904 * order increased. 3905 */ 3906 if (get_order(s->size) > get_order(s->object_size)) { 3907 s->flags &= ~DEBUG_METADATA_FLAGS; 3908 s->offset = 0; 3909 if (!calculate_sizes(s, -1)) 3910 goto error; 3911 } 3912 } 3913 3914 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 3915 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 3916 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 3917 /* Enable fast mode */ 3918 s->flags |= __CMPXCHG_DOUBLE; 3919 #endif 3920 3921 /* 3922 * The larger the object size is, the more pages we want on the partial 3923 * list to avoid pounding the page allocator excessively. 3924 */ 3925 set_min_partial(s, ilog2(s->size) / 2); 3926 3927 set_cpu_partial(s); 3928 3929 #ifdef CONFIG_NUMA 3930 s->remote_node_defrag_ratio = 1000; 3931 #endif 3932 3933 /* Initialize the pre-computed randomized freelist if slab is up */ 3934 if (slab_state >= UP) { 3935 if (init_cache_random_seq(s)) 3936 goto error; 3937 } 3938 3939 if (!init_kmem_cache_nodes(s)) 3940 goto error; 3941 3942 if (alloc_kmem_cache_cpus(s)) 3943 return 0; 3944 3945 free_kmem_cache_nodes(s); 3946 error: 3947 return -EINVAL; 3948 } 3949 3950 static void list_slab_objects(struct kmem_cache *s, struct page *page, 3951 const char *text) 3952 { 3953 #ifdef CONFIG_SLUB_DEBUG 3954 void *addr = page_address(page); 3955 unsigned long *map; 3956 void *p; 3957 3958 slab_err(s, page, text, s->name); 3959 slab_lock(page); 3960 3961 map = get_map(s, page); 3962 for_each_object(p, s, addr, page->objects) { 3963 3964 if (!test_bit(__obj_to_index(s, addr, p), map)) { 3965 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 3966 print_tracking(s, p); 3967 } 3968 } 3969 put_map(map); 3970 slab_unlock(page); 3971 #endif 3972 } 3973 3974 /* 3975 * Attempt to free all partial slabs on a node. 3976 * This is called from __kmem_cache_shutdown(). We must take list_lock 3977 * because sysfs file might still access partial list after the shutdowning. 3978 */ 3979 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3980 { 3981 LIST_HEAD(discard); 3982 struct page *page, *h; 3983 3984 BUG_ON(irqs_disabled()); 3985 spin_lock_irq(&n->list_lock); 3986 list_for_each_entry_safe(page, h, &n->partial, slab_list) { 3987 if (!page->inuse) { 3988 remove_partial(n, page); 3989 list_add(&page->slab_list, &discard); 3990 } else { 3991 list_slab_objects(s, page, 3992 "Objects remaining in %s on __kmem_cache_shutdown()"); 3993 } 3994 } 3995 spin_unlock_irq(&n->list_lock); 3996 3997 list_for_each_entry_safe(page, h, &discard, slab_list) 3998 discard_slab(s, page); 3999 } 4000 4001 bool __kmem_cache_empty(struct kmem_cache *s) 4002 { 4003 int node; 4004 struct kmem_cache_node *n; 4005 4006 for_each_kmem_cache_node(s, node, n) 4007 if (n->nr_partial || slabs_node(s, node)) 4008 return false; 4009 return true; 4010 } 4011 4012 /* 4013 * Release all resources used by a slab cache. 4014 */ 4015 int __kmem_cache_shutdown(struct kmem_cache *s) 4016 { 4017 int node; 4018 struct kmem_cache_node *n; 4019 4020 flush_all(s); 4021 /* Attempt to free all objects */ 4022 for_each_kmem_cache_node(s, node, n) { 4023 free_partial(s, n); 4024 if (n->nr_partial || slabs_node(s, node)) 4025 return 1; 4026 } 4027 return 0; 4028 } 4029 4030 #ifdef CONFIG_PRINTK 4031 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) 4032 { 4033 void *base; 4034 int __maybe_unused i; 4035 unsigned int objnr; 4036 void *objp; 4037 void *objp0; 4038 struct kmem_cache *s = page->slab_cache; 4039 struct track __maybe_unused *trackp; 4040 4041 kpp->kp_ptr = object; 4042 kpp->kp_page = page; 4043 kpp->kp_slab_cache = s; 4044 base = page_address(page); 4045 objp0 = kasan_reset_tag(object); 4046 #ifdef CONFIG_SLUB_DEBUG 4047 objp = restore_red_left(s, objp0); 4048 #else 4049 objp = objp0; 4050 #endif 4051 objnr = obj_to_index(s, page, objp); 4052 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 4053 objp = base + s->size * objnr; 4054 kpp->kp_objp = objp; 4055 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || 4056 !(s->flags & SLAB_STORE_USER)) 4057 return; 4058 #ifdef CONFIG_SLUB_DEBUG 4059 objp = fixup_red_left(s, objp); 4060 trackp = get_track(s, objp, TRACK_ALLOC); 4061 kpp->kp_ret = (void *)trackp->addr; 4062 #ifdef CONFIG_STACKDEPOT 4063 { 4064 depot_stack_handle_t handle; 4065 unsigned long *entries; 4066 unsigned int nr_entries; 4067 4068 handle = READ_ONCE(trackp->handle); 4069 if (handle) { 4070 nr_entries = stack_depot_fetch(handle, &entries); 4071 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4072 kpp->kp_stack[i] = (void *)entries[i]; 4073 } 4074 4075 trackp = get_track(s, objp, TRACK_FREE); 4076 handle = READ_ONCE(trackp->handle); 4077 if (handle) { 4078 nr_entries = stack_depot_fetch(handle, &entries); 4079 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 4080 kpp->kp_free_stack[i] = (void *)entries[i]; 4081 } 4082 } 4083 #endif 4084 #endif 4085 } 4086 #endif 4087 4088 /******************************************************************** 4089 * Kmalloc subsystem 4090 *******************************************************************/ 4091 4092 static int __init setup_slub_min_order(char *str) 4093 { 4094 get_option(&str, (int *)&slub_min_order); 4095 4096 return 1; 4097 } 4098 4099 __setup("slub_min_order=", setup_slub_min_order); 4100 4101 static int __init setup_slub_max_order(char *str) 4102 { 4103 get_option(&str, (int *)&slub_max_order); 4104 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4105 4106 return 1; 4107 } 4108 4109 __setup("slub_max_order=", setup_slub_max_order); 4110 4111 static int __init setup_slub_min_objects(char *str) 4112 { 4113 get_option(&str, (int *)&slub_min_objects); 4114 4115 return 1; 4116 } 4117 4118 __setup("slub_min_objects=", setup_slub_min_objects); 4119 4120 void *__kmalloc(size_t size, gfp_t flags) 4121 { 4122 struct kmem_cache *s; 4123 void *ret; 4124 4125 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4126 return kmalloc_large(size, flags); 4127 4128 s = kmalloc_slab(size, flags); 4129 4130 if (unlikely(ZERO_OR_NULL_PTR(s))) 4131 return s; 4132 4133 ret = slab_alloc(s, flags, _RET_IP_, size); 4134 4135 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 4136 4137 ret = kasan_kmalloc(s, ret, size, flags); 4138 4139 return ret; 4140 } 4141 EXPORT_SYMBOL(__kmalloc); 4142 4143 #ifdef CONFIG_NUMA 4144 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4145 { 4146 struct page *page; 4147 void *ptr = NULL; 4148 unsigned int order = get_order(size); 4149 4150 flags |= __GFP_COMP; 4151 page = alloc_pages_node(node, flags, order); 4152 if (page) { 4153 ptr = page_address(page); 4154 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4155 PAGE_SIZE << order); 4156 } 4157 4158 return kmalloc_large_node_hook(ptr, size, flags); 4159 } 4160 4161 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4162 { 4163 struct kmem_cache *s; 4164 void *ret; 4165 4166 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4167 ret = kmalloc_large_node(size, flags, node); 4168 4169 trace_kmalloc_node(_RET_IP_, ret, 4170 size, PAGE_SIZE << get_order(size), 4171 flags, node); 4172 4173 return ret; 4174 } 4175 4176 s = kmalloc_slab(size, flags); 4177 4178 if (unlikely(ZERO_OR_NULL_PTR(s))) 4179 return s; 4180 4181 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); 4182 4183 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 4184 4185 ret = kasan_kmalloc(s, ret, size, flags); 4186 4187 return ret; 4188 } 4189 EXPORT_SYMBOL(__kmalloc_node); 4190 #endif /* CONFIG_NUMA */ 4191 4192 #ifdef CONFIG_HARDENED_USERCOPY 4193 /* 4194 * Rejects incorrectly sized objects and objects that are to be copied 4195 * to/from userspace but do not fall entirely within the containing slab 4196 * cache's usercopy region. 4197 * 4198 * Returns NULL if check passes, otherwise const char * to name of cache 4199 * to indicate an error. 4200 */ 4201 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4202 bool to_user) 4203 { 4204 struct kmem_cache *s; 4205 unsigned int offset; 4206 size_t object_size; 4207 bool is_kfence = is_kfence_address(ptr); 4208 4209 ptr = kasan_reset_tag(ptr); 4210 4211 /* Find object and usable object size. */ 4212 s = page->slab_cache; 4213 4214 /* Reject impossible pointers. */ 4215 if (ptr < page_address(page)) 4216 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4217 to_user, 0, n); 4218 4219 /* Find offset within object. */ 4220 if (is_kfence) 4221 offset = ptr - kfence_object_start(ptr); 4222 else 4223 offset = (ptr - page_address(page)) % s->size; 4224 4225 /* Adjust for redzone and reject if within the redzone. */ 4226 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4227 if (offset < s->red_left_pad) 4228 usercopy_abort("SLUB object in left red zone", 4229 s->name, to_user, offset, n); 4230 offset -= s->red_left_pad; 4231 } 4232 4233 /* Allow address range falling entirely within usercopy region. */ 4234 if (offset >= s->useroffset && 4235 offset - s->useroffset <= s->usersize && 4236 n <= s->useroffset - offset + s->usersize) 4237 return; 4238 4239 /* 4240 * If the copy is still within the allocated object, produce 4241 * a warning instead of rejecting the copy. This is intended 4242 * to be a temporary method to find any missing usercopy 4243 * whitelists. 4244 */ 4245 object_size = slab_ksize(s); 4246 if (usercopy_fallback && 4247 offset <= object_size && n <= object_size - offset) { 4248 usercopy_warn("SLUB object", s->name, to_user, offset, n); 4249 return; 4250 } 4251 4252 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4253 } 4254 #endif /* CONFIG_HARDENED_USERCOPY */ 4255 4256 size_t __ksize(const void *object) 4257 { 4258 struct page *page; 4259 4260 if (unlikely(object == ZERO_SIZE_PTR)) 4261 return 0; 4262 4263 page = virt_to_head_page(object); 4264 4265 if (unlikely(!PageSlab(page))) { 4266 WARN_ON(!PageCompound(page)); 4267 return page_size(page); 4268 } 4269 4270 return slab_ksize(page->slab_cache); 4271 } 4272 EXPORT_SYMBOL(__ksize); 4273 4274 void kfree(const void *x) 4275 { 4276 struct page *page; 4277 void *object = (void *)x; 4278 4279 trace_kfree(_RET_IP_, x); 4280 4281 if (unlikely(ZERO_OR_NULL_PTR(x))) 4282 return; 4283 4284 page = virt_to_head_page(x); 4285 if (unlikely(!PageSlab(page))) { 4286 unsigned int order = compound_order(page); 4287 4288 BUG_ON(!PageCompound(page)); 4289 kfree_hook(object); 4290 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4291 -(PAGE_SIZE << order)); 4292 __free_pages(page, order); 4293 return; 4294 } 4295 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 4296 } 4297 EXPORT_SYMBOL(kfree); 4298 4299 #define SHRINK_PROMOTE_MAX 32 4300 4301 /* 4302 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4303 * up most to the head of the partial lists. New allocations will then 4304 * fill those up and thus they can be removed from the partial lists. 4305 * 4306 * The slabs with the least items are placed last. This results in them 4307 * being allocated from last increasing the chance that the last objects 4308 * are freed in them. 4309 */ 4310 int __kmem_cache_shrink(struct kmem_cache *s) 4311 { 4312 int node; 4313 int i; 4314 struct kmem_cache_node *n; 4315 struct page *page; 4316 struct page *t; 4317 struct list_head discard; 4318 struct list_head promote[SHRINK_PROMOTE_MAX]; 4319 unsigned long flags; 4320 int ret = 0; 4321 4322 flush_all(s); 4323 for_each_kmem_cache_node(s, node, n) { 4324 INIT_LIST_HEAD(&discard); 4325 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4326 INIT_LIST_HEAD(promote + i); 4327 4328 spin_lock_irqsave(&n->list_lock, flags); 4329 4330 /* 4331 * Build lists of slabs to discard or promote. 4332 * 4333 * Note that concurrent frees may occur while we hold the 4334 * list_lock. page->inuse here is the upper limit. 4335 */ 4336 list_for_each_entry_safe(page, t, &n->partial, slab_list) { 4337 int free = page->objects - page->inuse; 4338 4339 /* Do not reread page->inuse */ 4340 barrier(); 4341 4342 /* We do not keep full slabs on the list */ 4343 BUG_ON(free <= 0); 4344 4345 if (free == page->objects) { 4346 list_move(&page->slab_list, &discard); 4347 n->nr_partial--; 4348 } else if (free <= SHRINK_PROMOTE_MAX) 4349 list_move(&page->slab_list, promote + free - 1); 4350 } 4351 4352 /* 4353 * Promote the slabs filled up most to the head of the 4354 * partial list. 4355 */ 4356 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4357 list_splice(promote + i, &n->partial); 4358 4359 spin_unlock_irqrestore(&n->list_lock, flags); 4360 4361 /* Release empty slabs */ 4362 list_for_each_entry_safe(page, t, &discard, slab_list) 4363 discard_slab(s, page); 4364 4365 if (slabs_node(s, node)) 4366 ret = 1; 4367 } 4368 4369 return ret; 4370 } 4371 4372 static int slab_mem_going_offline_callback(void *arg) 4373 { 4374 struct kmem_cache *s; 4375 4376 mutex_lock(&slab_mutex); 4377 list_for_each_entry(s, &slab_caches, list) 4378 __kmem_cache_shrink(s); 4379 mutex_unlock(&slab_mutex); 4380 4381 return 0; 4382 } 4383 4384 static void slab_mem_offline_callback(void *arg) 4385 { 4386 struct memory_notify *marg = arg; 4387 int offline_node; 4388 4389 offline_node = marg->status_change_nid_normal; 4390 4391 /* 4392 * If the node still has available memory. we need kmem_cache_node 4393 * for it yet. 4394 */ 4395 if (offline_node < 0) 4396 return; 4397 4398 mutex_lock(&slab_mutex); 4399 node_clear(offline_node, slab_nodes); 4400 /* 4401 * We no longer free kmem_cache_node structures here, as it would be 4402 * racy with all get_node() users, and infeasible to protect them with 4403 * slab_mutex. 4404 */ 4405 mutex_unlock(&slab_mutex); 4406 } 4407 4408 static int slab_mem_going_online_callback(void *arg) 4409 { 4410 struct kmem_cache_node *n; 4411 struct kmem_cache *s; 4412 struct memory_notify *marg = arg; 4413 int nid = marg->status_change_nid_normal; 4414 int ret = 0; 4415 4416 /* 4417 * If the node's memory is already available, then kmem_cache_node is 4418 * already created. Nothing to do. 4419 */ 4420 if (nid < 0) 4421 return 0; 4422 4423 /* 4424 * We are bringing a node online. No memory is available yet. We must 4425 * allocate a kmem_cache_node structure in order to bring the node 4426 * online. 4427 */ 4428 mutex_lock(&slab_mutex); 4429 list_for_each_entry(s, &slab_caches, list) { 4430 /* 4431 * The structure may already exist if the node was previously 4432 * onlined and offlined. 4433 */ 4434 if (get_node(s, nid)) 4435 continue; 4436 /* 4437 * XXX: kmem_cache_alloc_node will fallback to other nodes 4438 * since memory is not yet available from the node that 4439 * is brought up. 4440 */ 4441 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4442 if (!n) { 4443 ret = -ENOMEM; 4444 goto out; 4445 } 4446 init_kmem_cache_node(n); 4447 s->node[nid] = n; 4448 } 4449 /* 4450 * Any cache created after this point will also have kmem_cache_node 4451 * initialized for the new node. 4452 */ 4453 node_set(nid, slab_nodes); 4454 out: 4455 mutex_unlock(&slab_mutex); 4456 return ret; 4457 } 4458 4459 static int slab_memory_callback(struct notifier_block *self, 4460 unsigned long action, void *arg) 4461 { 4462 int ret = 0; 4463 4464 switch (action) { 4465 case MEM_GOING_ONLINE: 4466 ret = slab_mem_going_online_callback(arg); 4467 break; 4468 case MEM_GOING_OFFLINE: 4469 ret = slab_mem_going_offline_callback(arg); 4470 break; 4471 case MEM_OFFLINE: 4472 case MEM_CANCEL_ONLINE: 4473 slab_mem_offline_callback(arg); 4474 break; 4475 case MEM_ONLINE: 4476 case MEM_CANCEL_OFFLINE: 4477 break; 4478 } 4479 if (ret) 4480 ret = notifier_from_errno(ret); 4481 else 4482 ret = NOTIFY_OK; 4483 return ret; 4484 } 4485 4486 static struct notifier_block slab_memory_callback_nb = { 4487 .notifier_call = slab_memory_callback, 4488 .priority = SLAB_CALLBACK_PRI, 4489 }; 4490 4491 /******************************************************************** 4492 * Basic setup of slabs 4493 *******************************************************************/ 4494 4495 /* 4496 * Used for early kmem_cache structures that were allocated using 4497 * the page allocator. Allocate them properly then fix up the pointers 4498 * that may be pointing to the wrong kmem_cache structure. 4499 */ 4500 4501 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4502 { 4503 int node; 4504 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4505 struct kmem_cache_node *n; 4506 4507 memcpy(s, static_cache, kmem_cache->object_size); 4508 4509 /* 4510 * This runs very early, and only the boot processor is supposed to be 4511 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4512 * IPIs around. 4513 */ 4514 __flush_cpu_slab(s, smp_processor_id()); 4515 for_each_kmem_cache_node(s, node, n) { 4516 struct page *p; 4517 4518 list_for_each_entry(p, &n->partial, slab_list) 4519 p->slab_cache = s; 4520 4521 #ifdef CONFIG_SLUB_DEBUG 4522 list_for_each_entry(p, &n->full, slab_list) 4523 p->slab_cache = s; 4524 #endif 4525 } 4526 list_add(&s->list, &slab_caches); 4527 return s; 4528 } 4529 4530 void __init kmem_cache_init(void) 4531 { 4532 static __initdata struct kmem_cache boot_kmem_cache, 4533 boot_kmem_cache_node; 4534 int node; 4535 4536 if (debug_guardpage_minorder()) 4537 slub_max_order = 0; 4538 4539 /* Print slub debugging pointers without hashing */ 4540 if (__slub_debug_enabled()) 4541 no_hash_pointers_enable(NULL); 4542 4543 kmem_cache_node = &boot_kmem_cache_node; 4544 kmem_cache = &boot_kmem_cache; 4545 4546 /* 4547 * Initialize the nodemask for which we will allocate per node 4548 * structures. Here we don't need taking slab_mutex yet. 4549 */ 4550 for_each_node_state(node, N_NORMAL_MEMORY) 4551 node_set(node, slab_nodes); 4552 4553 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4554 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4555 4556 register_hotmemory_notifier(&slab_memory_callback_nb); 4557 4558 /* Able to allocate the per node structures */ 4559 slab_state = PARTIAL; 4560 4561 create_boot_cache(kmem_cache, "kmem_cache", 4562 offsetof(struct kmem_cache, node) + 4563 nr_node_ids * sizeof(struct kmem_cache_node *), 4564 SLAB_HWCACHE_ALIGN, 0, 0); 4565 4566 kmem_cache = bootstrap(&boot_kmem_cache); 4567 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4568 4569 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4570 setup_kmalloc_cache_index_table(); 4571 create_kmalloc_caches(0); 4572 4573 /* Setup random freelists for each cache */ 4574 init_freelist_randomization(); 4575 4576 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4577 slub_cpu_dead); 4578 4579 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4580 cache_line_size(), 4581 slub_min_order, slub_max_order, slub_min_objects, 4582 nr_cpu_ids, nr_node_ids); 4583 } 4584 4585 void __init kmem_cache_init_late(void) 4586 { 4587 } 4588 4589 struct kmem_cache * 4590 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4591 slab_flags_t flags, void (*ctor)(void *)) 4592 { 4593 struct kmem_cache *s; 4594 4595 s = find_mergeable(size, align, flags, name, ctor); 4596 if (s) { 4597 s->refcount++; 4598 4599 /* 4600 * Adjust the object sizes so that we clear 4601 * the complete object on kzalloc. 4602 */ 4603 s->object_size = max(s->object_size, size); 4604 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4605 4606 if (sysfs_slab_alias(s, name)) { 4607 s->refcount--; 4608 s = NULL; 4609 } 4610 } 4611 4612 return s; 4613 } 4614 4615 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4616 { 4617 int err; 4618 4619 err = kmem_cache_open(s, flags); 4620 if (err) 4621 return err; 4622 4623 /* Mutex is not taken during early boot */ 4624 if (slab_state <= UP) 4625 return 0; 4626 4627 err = sysfs_slab_add(s); 4628 if (err) 4629 __kmem_cache_release(s); 4630 4631 if (s->flags & SLAB_STORE_USER) 4632 debugfs_slab_add(s); 4633 4634 return err; 4635 } 4636 4637 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4638 { 4639 struct kmem_cache *s; 4640 void *ret; 4641 4642 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4643 return kmalloc_large(size, gfpflags); 4644 4645 s = kmalloc_slab(size, gfpflags); 4646 4647 if (unlikely(ZERO_OR_NULL_PTR(s))) 4648 return s; 4649 4650 ret = slab_alloc(s, gfpflags, caller, size); 4651 4652 /* Honor the call site pointer we received. */ 4653 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4654 4655 return ret; 4656 } 4657 EXPORT_SYMBOL(__kmalloc_track_caller); 4658 4659 #ifdef CONFIG_NUMA 4660 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4661 int node, unsigned long caller) 4662 { 4663 struct kmem_cache *s; 4664 void *ret; 4665 4666 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4667 ret = kmalloc_large_node(size, gfpflags, node); 4668 4669 trace_kmalloc_node(caller, ret, 4670 size, PAGE_SIZE << get_order(size), 4671 gfpflags, node); 4672 4673 return ret; 4674 } 4675 4676 s = kmalloc_slab(size, gfpflags); 4677 4678 if (unlikely(ZERO_OR_NULL_PTR(s))) 4679 return s; 4680 4681 ret = slab_alloc_node(s, gfpflags, node, caller, size); 4682 4683 /* Honor the call site pointer we received. */ 4684 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4685 4686 return ret; 4687 } 4688 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4689 #endif 4690 4691 #ifdef CONFIG_SYSFS 4692 static int count_inuse(struct page *page) 4693 { 4694 return page->inuse; 4695 } 4696 4697 static int count_total(struct page *page) 4698 { 4699 return page->objects; 4700 } 4701 #endif 4702 4703 #ifdef CONFIG_SLUB_DEBUG 4704 static void validate_slab(struct kmem_cache *s, struct page *page) 4705 { 4706 void *p; 4707 void *addr = page_address(page); 4708 unsigned long *map; 4709 4710 slab_lock(page); 4711 4712 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) 4713 goto unlock; 4714 4715 /* Now we know that a valid freelist exists */ 4716 map = get_map(s, page); 4717 for_each_object(p, s, addr, page->objects) { 4718 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? 4719 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4720 4721 if (!check_object(s, page, p, val)) 4722 break; 4723 } 4724 put_map(map); 4725 unlock: 4726 slab_unlock(page); 4727 } 4728 4729 static int validate_slab_node(struct kmem_cache *s, 4730 struct kmem_cache_node *n) 4731 { 4732 unsigned long count = 0; 4733 struct page *page; 4734 unsigned long flags; 4735 4736 spin_lock_irqsave(&n->list_lock, flags); 4737 4738 list_for_each_entry(page, &n->partial, slab_list) { 4739 validate_slab(s, page); 4740 count++; 4741 } 4742 if (count != n->nr_partial) { 4743 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4744 s->name, count, n->nr_partial); 4745 slab_add_kunit_errors(); 4746 } 4747 4748 if (!(s->flags & SLAB_STORE_USER)) 4749 goto out; 4750 4751 list_for_each_entry(page, &n->full, slab_list) { 4752 validate_slab(s, page); 4753 count++; 4754 } 4755 if (count != atomic_long_read(&n->nr_slabs)) { 4756 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4757 s->name, count, atomic_long_read(&n->nr_slabs)); 4758 slab_add_kunit_errors(); 4759 } 4760 4761 out: 4762 spin_unlock_irqrestore(&n->list_lock, flags); 4763 return count; 4764 } 4765 4766 long validate_slab_cache(struct kmem_cache *s) 4767 { 4768 int node; 4769 unsigned long count = 0; 4770 struct kmem_cache_node *n; 4771 4772 flush_all(s); 4773 for_each_kmem_cache_node(s, node, n) 4774 count += validate_slab_node(s, n); 4775 4776 return count; 4777 } 4778 EXPORT_SYMBOL(validate_slab_cache); 4779 4780 #ifdef CONFIG_DEBUG_FS 4781 /* 4782 * Generate lists of code addresses where slabcache objects are allocated 4783 * and freed. 4784 */ 4785 4786 struct location { 4787 unsigned long count; 4788 unsigned long addr; 4789 long long sum_time; 4790 long min_time; 4791 long max_time; 4792 long min_pid; 4793 long max_pid; 4794 DECLARE_BITMAP(cpus, NR_CPUS); 4795 nodemask_t nodes; 4796 }; 4797 4798 struct loc_track { 4799 unsigned long max; 4800 unsigned long count; 4801 struct location *loc; 4802 }; 4803 4804 static struct dentry *slab_debugfs_root; 4805 4806 static void free_loc_track(struct loc_track *t) 4807 { 4808 if (t->max) 4809 free_pages((unsigned long)t->loc, 4810 get_order(sizeof(struct location) * t->max)); 4811 } 4812 4813 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4814 { 4815 struct location *l; 4816 int order; 4817 4818 order = get_order(sizeof(struct location) * max); 4819 4820 l = (void *)__get_free_pages(flags, order); 4821 if (!l) 4822 return 0; 4823 4824 if (t->count) { 4825 memcpy(l, t->loc, sizeof(struct location) * t->count); 4826 free_loc_track(t); 4827 } 4828 t->max = max; 4829 t->loc = l; 4830 return 1; 4831 } 4832 4833 static int add_location(struct loc_track *t, struct kmem_cache *s, 4834 const struct track *track) 4835 { 4836 long start, end, pos; 4837 struct location *l; 4838 unsigned long caddr; 4839 unsigned long age = jiffies - track->when; 4840 4841 start = -1; 4842 end = t->count; 4843 4844 for ( ; ; ) { 4845 pos = start + (end - start + 1) / 2; 4846 4847 /* 4848 * There is nothing at "end". If we end up there 4849 * we need to add something to before end. 4850 */ 4851 if (pos == end) 4852 break; 4853 4854 caddr = t->loc[pos].addr; 4855 if (track->addr == caddr) { 4856 4857 l = &t->loc[pos]; 4858 l->count++; 4859 if (track->when) { 4860 l->sum_time += age; 4861 if (age < l->min_time) 4862 l->min_time = age; 4863 if (age > l->max_time) 4864 l->max_time = age; 4865 4866 if (track->pid < l->min_pid) 4867 l->min_pid = track->pid; 4868 if (track->pid > l->max_pid) 4869 l->max_pid = track->pid; 4870 4871 cpumask_set_cpu(track->cpu, 4872 to_cpumask(l->cpus)); 4873 } 4874 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4875 return 1; 4876 } 4877 4878 if (track->addr < caddr) 4879 end = pos; 4880 else 4881 start = pos; 4882 } 4883 4884 /* 4885 * Not found. Insert new tracking element. 4886 */ 4887 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4888 return 0; 4889 4890 l = t->loc + pos; 4891 if (pos < t->count) 4892 memmove(l + 1, l, 4893 (t->count - pos) * sizeof(struct location)); 4894 t->count++; 4895 l->count = 1; 4896 l->addr = track->addr; 4897 l->sum_time = age; 4898 l->min_time = age; 4899 l->max_time = age; 4900 l->min_pid = track->pid; 4901 l->max_pid = track->pid; 4902 cpumask_clear(to_cpumask(l->cpus)); 4903 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4904 nodes_clear(l->nodes); 4905 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4906 return 1; 4907 } 4908 4909 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4910 struct page *page, enum track_item alloc) 4911 { 4912 void *addr = page_address(page); 4913 void *p; 4914 unsigned long *map; 4915 4916 map = get_map(s, page); 4917 for_each_object(p, s, addr, page->objects) 4918 if (!test_bit(__obj_to_index(s, addr, p), map)) 4919 add_location(t, s, get_track(s, p, alloc)); 4920 put_map(map); 4921 } 4922 #endif /* CONFIG_DEBUG_FS */ 4923 #endif /* CONFIG_SLUB_DEBUG */ 4924 4925 #ifdef CONFIG_SYSFS 4926 enum slab_stat_type { 4927 SL_ALL, /* All slabs */ 4928 SL_PARTIAL, /* Only partially allocated slabs */ 4929 SL_CPU, /* Only slabs used for cpu caches */ 4930 SL_OBJECTS, /* Determine allocated objects not slabs */ 4931 SL_TOTAL /* Determine object capacity not slabs */ 4932 }; 4933 4934 #define SO_ALL (1 << SL_ALL) 4935 #define SO_PARTIAL (1 << SL_PARTIAL) 4936 #define SO_CPU (1 << SL_CPU) 4937 #define SO_OBJECTS (1 << SL_OBJECTS) 4938 #define SO_TOTAL (1 << SL_TOTAL) 4939 4940 static ssize_t show_slab_objects(struct kmem_cache *s, 4941 char *buf, unsigned long flags) 4942 { 4943 unsigned long total = 0; 4944 int node; 4945 int x; 4946 unsigned long *nodes; 4947 int len = 0; 4948 4949 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 4950 if (!nodes) 4951 return -ENOMEM; 4952 4953 if (flags & SO_CPU) { 4954 int cpu; 4955 4956 for_each_possible_cpu(cpu) { 4957 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 4958 cpu); 4959 int node; 4960 struct page *page; 4961 4962 page = READ_ONCE(c->page); 4963 if (!page) 4964 continue; 4965 4966 node = page_to_nid(page); 4967 if (flags & SO_TOTAL) 4968 x = page->objects; 4969 else if (flags & SO_OBJECTS) 4970 x = page->inuse; 4971 else 4972 x = 1; 4973 4974 total += x; 4975 nodes[node] += x; 4976 4977 page = slub_percpu_partial_read_once(c); 4978 if (page) { 4979 node = page_to_nid(page); 4980 if (flags & SO_TOTAL) 4981 WARN_ON_ONCE(1); 4982 else if (flags & SO_OBJECTS) 4983 WARN_ON_ONCE(1); 4984 else 4985 x = page->pages; 4986 total += x; 4987 nodes[node] += x; 4988 } 4989 } 4990 } 4991 4992 /* 4993 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 4994 * already held which will conflict with an existing lock order: 4995 * 4996 * mem_hotplug_lock->slab_mutex->kernfs_mutex 4997 * 4998 * We don't really need mem_hotplug_lock (to hold off 4999 * slab_mem_going_offline_callback) here because slab's memory hot 5000 * unplug code doesn't destroy the kmem_cache->node[] data. 5001 */ 5002 5003 #ifdef CONFIG_SLUB_DEBUG 5004 if (flags & SO_ALL) { 5005 struct kmem_cache_node *n; 5006 5007 for_each_kmem_cache_node(s, node, n) { 5008 5009 if (flags & SO_TOTAL) 5010 x = atomic_long_read(&n->total_objects); 5011 else if (flags & SO_OBJECTS) 5012 x = atomic_long_read(&n->total_objects) - 5013 count_partial(n, count_free); 5014 else 5015 x = atomic_long_read(&n->nr_slabs); 5016 total += x; 5017 nodes[node] += x; 5018 } 5019 5020 } else 5021 #endif 5022 if (flags & SO_PARTIAL) { 5023 struct kmem_cache_node *n; 5024 5025 for_each_kmem_cache_node(s, node, n) { 5026 if (flags & SO_TOTAL) 5027 x = count_partial(n, count_total); 5028 else if (flags & SO_OBJECTS) 5029 x = count_partial(n, count_inuse); 5030 else 5031 x = n->nr_partial; 5032 total += x; 5033 nodes[node] += x; 5034 } 5035 } 5036 5037 len += sysfs_emit_at(buf, len, "%lu", total); 5038 #ifdef CONFIG_NUMA 5039 for (node = 0; node < nr_node_ids; node++) { 5040 if (nodes[node]) 5041 len += sysfs_emit_at(buf, len, " N%d=%lu", 5042 node, nodes[node]); 5043 } 5044 #endif 5045 len += sysfs_emit_at(buf, len, "\n"); 5046 kfree(nodes); 5047 5048 return len; 5049 } 5050 5051 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5052 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5053 5054 struct slab_attribute { 5055 struct attribute attr; 5056 ssize_t (*show)(struct kmem_cache *s, char *buf); 5057 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5058 }; 5059 5060 #define SLAB_ATTR_RO(_name) \ 5061 static struct slab_attribute _name##_attr = \ 5062 __ATTR(_name, 0400, _name##_show, NULL) 5063 5064 #define SLAB_ATTR(_name) \ 5065 static struct slab_attribute _name##_attr = \ 5066 __ATTR(_name, 0600, _name##_show, _name##_store) 5067 5068 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5069 { 5070 return sysfs_emit(buf, "%u\n", s->size); 5071 } 5072 SLAB_ATTR_RO(slab_size); 5073 5074 static ssize_t align_show(struct kmem_cache *s, char *buf) 5075 { 5076 return sysfs_emit(buf, "%u\n", s->align); 5077 } 5078 SLAB_ATTR_RO(align); 5079 5080 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5081 { 5082 return sysfs_emit(buf, "%u\n", s->object_size); 5083 } 5084 SLAB_ATTR_RO(object_size); 5085 5086 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5087 { 5088 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5089 } 5090 SLAB_ATTR_RO(objs_per_slab); 5091 5092 static ssize_t order_show(struct kmem_cache *s, char *buf) 5093 { 5094 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5095 } 5096 SLAB_ATTR_RO(order); 5097 5098 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5099 { 5100 return sysfs_emit(buf, "%lu\n", s->min_partial); 5101 } 5102 5103 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5104 size_t length) 5105 { 5106 unsigned long min; 5107 int err; 5108 5109 err = kstrtoul(buf, 10, &min); 5110 if (err) 5111 return err; 5112 5113 set_min_partial(s, min); 5114 return length; 5115 } 5116 SLAB_ATTR(min_partial); 5117 5118 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5119 { 5120 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s)); 5121 } 5122 5123 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5124 size_t length) 5125 { 5126 unsigned int objects; 5127 int err; 5128 5129 err = kstrtouint(buf, 10, &objects); 5130 if (err) 5131 return err; 5132 if (objects && !kmem_cache_has_cpu_partial(s)) 5133 return -EINVAL; 5134 5135 slub_set_cpu_partial(s, objects); 5136 flush_all(s); 5137 return length; 5138 } 5139 SLAB_ATTR(cpu_partial); 5140 5141 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5142 { 5143 if (!s->ctor) 5144 return 0; 5145 return sysfs_emit(buf, "%pS\n", s->ctor); 5146 } 5147 SLAB_ATTR_RO(ctor); 5148 5149 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5150 { 5151 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5152 } 5153 SLAB_ATTR_RO(aliases); 5154 5155 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5156 { 5157 return show_slab_objects(s, buf, SO_PARTIAL); 5158 } 5159 SLAB_ATTR_RO(partial); 5160 5161 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5162 { 5163 return show_slab_objects(s, buf, SO_CPU); 5164 } 5165 SLAB_ATTR_RO(cpu_slabs); 5166 5167 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5168 { 5169 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5170 } 5171 SLAB_ATTR_RO(objects); 5172 5173 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5174 { 5175 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5176 } 5177 SLAB_ATTR_RO(objects_partial); 5178 5179 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5180 { 5181 int objects = 0; 5182 int pages = 0; 5183 int cpu; 5184 int len = 0; 5185 5186 for_each_online_cpu(cpu) { 5187 struct page *page; 5188 5189 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5190 5191 if (page) { 5192 pages += page->pages; 5193 objects += page->pobjects; 5194 } 5195 } 5196 5197 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); 5198 5199 #ifdef CONFIG_SMP 5200 for_each_online_cpu(cpu) { 5201 struct page *page; 5202 5203 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5204 if (page) 5205 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5206 cpu, page->pobjects, page->pages); 5207 } 5208 #endif 5209 len += sysfs_emit_at(buf, len, "\n"); 5210 5211 return len; 5212 } 5213 SLAB_ATTR_RO(slabs_cpu_partial); 5214 5215 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5216 { 5217 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5218 } 5219 SLAB_ATTR_RO(reclaim_account); 5220 5221 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5222 { 5223 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5224 } 5225 SLAB_ATTR_RO(hwcache_align); 5226 5227 #ifdef CONFIG_ZONE_DMA 5228 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5229 { 5230 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5231 } 5232 SLAB_ATTR_RO(cache_dma); 5233 #endif 5234 5235 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5236 { 5237 return sysfs_emit(buf, "%u\n", s->usersize); 5238 } 5239 SLAB_ATTR_RO(usersize); 5240 5241 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5242 { 5243 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5244 } 5245 SLAB_ATTR_RO(destroy_by_rcu); 5246 5247 #ifdef CONFIG_SLUB_DEBUG 5248 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5249 { 5250 return show_slab_objects(s, buf, SO_ALL); 5251 } 5252 SLAB_ATTR_RO(slabs); 5253 5254 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5255 { 5256 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5257 } 5258 SLAB_ATTR_RO(total_objects); 5259 5260 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5261 { 5262 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5263 } 5264 SLAB_ATTR_RO(sanity_checks); 5265 5266 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5267 { 5268 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5269 } 5270 SLAB_ATTR_RO(trace); 5271 5272 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5273 { 5274 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5275 } 5276 5277 SLAB_ATTR_RO(red_zone); 5278 5279 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5280 { 5281 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5282 } 5283 5284 SLAB_ATTR_RO(poison); 5285 5286 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5287 { 5288 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5289 } 5290 5291 SLAB_ATTR_RO(store_user); 5292 5293 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5294 { 5295 return 0; 5296 } 5297 5298 static ssize_t validate_store(struct kmem_cache *s, 5299 const char *buf, size_t length) 5300 { 5301 int ret = -EINVAL; 5302 5303 if (buf[0] == '1') { 5304 ret = validate_slab_cache(s); 5305 if (ret >= 0) 5306 ret = length; 5307 } 5308 return ret; 5309 } 5310 SLAB_ATTR(validate); 5311 5312 #endif /* CONFIG_SLUB_DEBUG */ 5313 5314 #ifdef CONFIG_FAILSLAB 5315 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5316 { 5317 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5318 } 5319 SLAB_ATTR_RO(failslab); 5320 #endif 5321 5322 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5323 { 5324 return 0; 5325 } 5326 5327 static ssize_t shrink_store(struct kmem_cache *s, 5328 const char *buf, size_t length) 5329 { 5330 if (buf[0] == '1') 5331 kmem_cache_shrink(s); 5332 else 5333 return -EINVAL; 5334 return length; 5335 } 5336 SLAB_ATTR(shrink); 5337 5338 #ifdef CONFIG_NUMA 5339 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5340 { 5341 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5342 } 5343 5344 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5345 const char *buf, size_t length) 5346 { 5347 unsigned int ratio; 5348 int err; 5349 5350 err = kstrtouint(buf, 10, &ratio); 5351 if (err) 5352 return err; 5353 if (ratio > 100) 5354 return -ERANGE; 5355 5356 s->remote_node_defrag_ratio = ratio * 10; 5357 5358 return length; 5359 } 5360 SLAB_ATTR(remote_node_defrag_ratio); 5361 #endif 5362 5363 #ifdef CONFIG_SLUB_STATS 5364 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5365 { 5366 unsigned long sum = 0; 5367 int cpu; 5368 int len = 0; 5369 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5370 5371 if (!data) 5372 return -ENOMEM; 5373 5374 for_each_online_cpu(cpu) { 5375 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5376 5377 data[cpu] = x; 5378 sum += x; 5379 } 5380 5381 len += sysfs_emit_at(buf, len, "%lu", sum); 5382 5383 #ifdef CONFIG_SMP 5384 for_each_online_cpu(cpu) { 5385 if (data[cpu]) 5386 len += sysfs_emit_at(buf, len, " C%d=%u", 5387 cpu, data[cpu]); 5388 } 5389 #endif 5390 kfree(data); 5391 len += sysfs_emit_at(buf, len, "\n"); 5392 5393 return len; 5394 } 5395 5396 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5397 { 5398 int cpu; 5399 5400 for_each_online_cpu(cpu) 5401 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5402 } 5403 5404 #define STAT_ATTR(si, text) \ 5405 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5406 { \ 5407 return show_stat(s, buf, si); \ 5408 } \ 5409 static ssize_t text##_store(struct kmem_cache *s, \ 5410 const char *buf, size_t length) \ 5411 { \ 5412 if (buf[0] != '0') \ 5413 return -EINVAL; \ 5414 clear_stat(s, si); \ 5415 return length; \ 5416 } \ 5417 SLAB_ATTR(text); \ 5418 5419 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5420 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5421 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5422 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5423 STAT_ATTR(FREE_FROZEN, free_frozen); 5424 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5425 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5426 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5427 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5428 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5429 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5430 STAT_ATTR(FREE_SLAB, free_slab); 5431 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5432 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5433 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5434 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5435 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5436 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5437 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5438 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5439 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5440 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5441 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5442 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5443 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5444 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5445 #endif /* CONFIG_SLUB_STATS */ 5446 5447 static struct attribute *slab_attrs[] = { 5448 &slab_size_attr.attr, 5449 &object_size_attr.attr, 5450 &objs_per_slab_attr.attr, 5451 &order_attr.attr, 5452 &min_partial_attr.attr, 5453 &cpu_partial_attr.attr, 5454 &objects_attr.attr, 5455 &objects_partial_attr.attr, 5456 &partial_attr.attr, 5457 &cpu_slabs_attr.attr, 5458 &ctor_attr.attr, 5459 &aliases_attr.attr, 5460 &align_attr.attr, 5461 &hwcache_align_attr.attr, 5462 &reclaim_account_attr.attr, 5463 &destroy_by_rcu_attr.attr, 5464 &shrink_attr.attr, 5465 &slabs_cpu_partial_attr.attr, 5466 #ifdef CONFIG_SLUB_DEBUG 5467 &total_objects_attr.attr, 5468 &slabs_attr.attr, 5469 &sanity_checks_attr.attr, 5470 &trace_attr.attr, 5471 &red_zone_attr.attr, 5472 &poison_attr.attr, 5473 &store_user_attr.attr, 5474 &validate_attr.attr, 5475 #endif 5476 #ifdef CONFIG_ZONE_DMA 5477 &cache_dma_attr.attr, 5478 #endif 5479 #ifdef CONFIG_NUMA 5480 &remote_node_defrag_ratio_attr.attr, 5481 #endif 5482 #ifdef CONFIG_SLUB_STATS 5483 &alloc_fastpath_attr.attr, 5484 &alloc_slowpath_attr.attr, 5485 &free_fastpath_attr.attr, 5486 &free_slowpath_attr.attr, 5487 &free_frozen_attr.attr, 5488 &free_add_partial_attr.attr, 5489 &free_remove_partial_attr.attr, 5490 &alloc_from_partial_attr.attr, 5491 &alloc_slab_attr.attr, 5492 &alloc_refill_attr.attr, 5493 &alloc_node_mismatch_attr.attr, 5494 &free_slab_attr.attr, 5495 &cpuslab_flush_attr.attr, 5496 &deactivate_full_attr.attr, 5497 &deactivate_empty_attr.attr, 5498 &deactivate_to_head_attr.attr, 5499 &deactivate_to_tail_attr.attr, 5500 &deactivate_remote_frees_attr.attr, 5501 &deactivate_bypass_attr.attr, 5502 &order_fallback_attr.attr, 5503 &cmpxchg_double_fail_attr.attr, 5504 &cmpxchg_double_cpu_fail_attr.attr, 5505 &cpu_partial_alloc_attr.attr, 5506 &cpu_partial_free_attr.attr, 5507 &cpu_partial_node_attr.attr, 5508 &cpu_partial_drain_attr.attr, 5509 #endif 5510 #ifdef CONFIG_FAILSLAB 5511 &failslab_attr.attr, 5512 #endif 5513 &usersize_attr.attr, 5514 5515 NULL 5516 }; 5517 5518 static const struct attribute_group slab_attr_group = { 5519 .attrs = slab_attrs, 5520 }; 5521 5522 static ssize_t slab_attr_show(struct kobject *kobj, 5523 struct attribute *attr, 5524 char *buf) 5525 { 5526 struct slab_attribute *attribute; 5527 struct kmem_cache *s; 5528 int err; 5529 5530 attribute = to_slab_attr(attr); 5531 s = to_slab(kobj); 5532 5533 if (!attribute->show) 5534 return -EIO; 5535 5536 err = attribute->show(s, buf); 5537 5538 return err; 5539 } 5540 5541 static ssize_t slab_attr_store(struct kobject *kobj, 5542 struct attribute *attr, 5543 const char *buf, size_t len) 5544 { 5545 struct slab_attribute *attribute; 5546 struct kmem_cache *s; 5547 int err; 5548 5549 attribute = to_slab_attr(attr); 5550 s = to_slab(kobj); 5551 5552 if (!attribute->store) 5553 return -EIO; 5554 5555 err = attribute->store(s, buf, len); 5556 return err; 5557 } 5558 5559 static void kmem_cache_release(struct kobject *k) 5560 { 5561 slab_kmem_cache_release(to_slab(k)); 5562 } 5563 5564 static const struct sysfs_ops slab_sysfs_ops = { 5565 .show = slab_attr_show, 5566 .store = slab_attr_store, 5567 }; 5568 5569 static struct kobj_type slab_ktype = { 5570 .sysfs_ops = &slab_sysfs_ops, 5571 .release = kmem_cache_release, 5572 }; 5573 5574 static struct kset *slab_kset; 5575 5576 static inline struct kset *cache_kset(struct kmem_cache *s) 5577 { 5578 return slab_kset; 5579 } 5580 5581 #define ID_STR_LENGTH 64 5582 5583 /* Create a unique string id for a slab cache: 5584 * 5585 * Format :[flags-]size 5586 */ 5587 static char *create_unique_id(struct kmem_cache *s) 5588 { 5589 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5590 char *p = name; 5591 5592 BUG_ON(!name); 5593 5594 *p++ = ':'; 5595 /* 5596 * First flags affecting slabcache operations. We will only 5597 * get here for aliasable slabs so we do not need to support 5598 * too many flags. The flags here must cover all flags that 5599 * are matched during merging to guarantee that the id is 5600 * unique. 5601 */ 5602 if (s->flags & SLAB_CACHE_DMA) 5603 *p++ = 'd'; 5604 if (s->flags & SLAB_CACHE_DMA32) 5605 *p++ = 'D'; 5606 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5607 *p++ = 'a'; 5608 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5609 *p++ = 'F'; 5610 if (s->flags & SLAB_ACCOUNT) 5611 *p++ = 'A'; 5612 if (p != name + 1) 5613 *p++ = '-'; 5614 p += sprintf(p, "%07u", s->size); 5615 5616 BUG_ON(p > name + ID_STR_LENGTH - 1); 5617 return name; 5618 } 5619 5620 static int sysfs_slab_add(struct kmem_cache *s) 5621 { 5622 int err; 5623 const char *name; 5624 struct kset *kset = cache_kset(s); 5625 int unmergeable = slab_unmergeable(s); 5626 5627 if (!kset) { 5628 kobject_init(&s->kobj, &slab_ktype); 5629 return 0; 5630 } 5631 5632 if (!unmergeable && disable_higher_order_debug && 5633 (slub_debug & DEBUG_METADATA_FLAGS)) 5634 unmergeable = 1; 5635 5636 if (unmergeable) { 5637 /* 5638 * Slabcache can never be merged so we can use the name proper. 5639 * This is typically the case for debug situations. In that 5640 * case we can catch duplicate names easily. 5641 */ 5642 sysfs_remove_link(&slab_kset->kobj, s->name); 5643 name = s->name; 5644 } else { 5645 /* 5646 * Create a unique name for the slab as a target 5647 * for the symlinks. 5648 */ 5649 name = create_unique_id(s); 5650 } 5651 5652 s->kobj.kset = kset; 5653 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5654 if (err) 5655 goto out; 5656 5657 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5658 if (err) 5659 goto out_del_kobj; 5660 5661 if (!unmergeable) { 5662 /* Setup first alias */ 5663 sysfs_slab_alias(s, s->name); 5664 } 5665 out: 5666 if (!unmergeable) 5667 kfree(name); 5668 return err; 5669 out_del_kobj: 5670 kobject_del(&s->kobj); 5671 goto out; 5672 } 5673 5674 void sysfs_slab_unlink(struct kmem_cache *s) 5675 { 5676 if (slab_state >= FULL) 5677 kobject_del(&s->kobj); 5678 } 5679 5680 void sysfs_slab_release(struct kmem_cache *s) 5681 { 5682 if (slab_state >= FULL) 5683 kobject_put(&s->kobj); 5684 } 5685 5686 /* 5687 * Need to buffer aliases during bootup until sysfs becomes 5688 * available lest we lose that information. 5689 */ 5690 struct saved_alias { 5691 struct kmem_cache *s; 5692 const char *name; 5693 struct saved_alias *next; 5694 }; 5695 5696 static struct saved_alias *alias_list; 5697 5698 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5699 { 5700 struct saved_alias *al; 5701 5702 if (slab_state == FULL) { 5703 /* 5704 * If we have a leftover link then remove it. 5705 */ 5706 sysfs_remove_link(&slab_kset->kobj, name); 5707 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5708 } 5709 5710 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5711 if (!al) 5712 return -ENOMEM; 5713 5714 al->s = s; 5715 al->name = name; 5716 al->next = alias_list; 5717 alias_list = al; 5718 return 0; 5719 } 5720 5721 static int __init slab_sysfs_init(void) 5722 { 5723 struct kmem_cache *s; 5724 int err; 5725 5726 mutex_lock(&slab_mutex); 5727 5728 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5729 if (!slab_kset) { 5730 mutex_unlock(&slab_mutex); 5731 pr_err("Cannot register slab subsystem.\n"); 5732 return -ENOSYS; 5733 } 5734 5735 slab_state = FULL; 5736 5737 list_for_each_entry(s, &slab_caches, list) { 5738 err = sysfs_slab_add(s); 5739 if (err) 5740 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5741 s->name); 5742 } 5743 5744 while (alias_list) { 5745 struct saved_alias *al = alias_list; 5746 5747 alias_list = alias_list->next; 5748 err = sysfs_slab_alias(al->s, al->name); 5749 if (err) 5750 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5751 al->name); 5752 kfree(al); 5753 } 5754 5755 mutex_unlock(&slab_mutex); 5756 return 0; 5757 } 5758 5759 __initcall(slab_sysfs_init); 5760 #endif /* CONFIG_SYSFS */ 5761 5762 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 5763 static int slab_debugfs_show(struct seq_file *seq, void *v) 5764 { 5765 5766 struct location *l; 5767 unsigned int idx = *(unsigned int *)v; 5768 struct loc_track *t = seq->private; 5769 5770 if (idx < t->count) { 5771 l = &t->loc[idx]; 5772 5773 seq_printf(seq, "%7ld ", l->count); 5774 5775 if (l->addr) 5776 seq_printf(seq, "%pS", (void *)l->addr); 5777 else 5778 seq_puts(seq, "<not-available>"); 5779 5780 if (l->sum_time != l->min_time) { 5781 seq_printf(seq, " age=%ld/%llu/%ld", 5782 l->min_time, div_u64(l->sum_time, l->count), 5783 l->max_time); 5784 } else 5785 seq_printf(seq, " age=%ld", l->min_time); 5786 5787 if (l->min_pid != l->max_pid) 5788 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 5789 else 5790 seq_printf(seq, " pid=%ld", 5791 l->min_pid); 5792 5793 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 5794 seq_printf(seq, " cpus=%*pbl", 5795 cpumask_pr_args(to_cpumask(l->cpus))); 5796 5797 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 5798 seq_printf(seq, " nodes=%*pbl", 5799 nodemask_pr_args(&l->nodes)); 5800 5801 seq_puts(seq, "\n"); 5802 } 5803 5804 if (!idx && !t->count) 5805 seq_puts(seq, "No data\n"); 5806 5807 return 0; 5808 } 5809 5810 static void slab_debugfs_stop(struct seq_file *seq, void *v) 5811 { 5812 } 5813 5814 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 5815 { 5816 struct loc_track *t = seq->private; 5817 5818 v = ppos; 5819 ++*ppos; 5820 if (*ppos <= t->count) 5821 return v; 5822 5823 return NULL; 5824 } 5825 5826 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 5827 { 5828 return ppos; 5829 } 5830 5831 static const struct seq_operations slab_debugfs_sops = { 5832 .start = slab_debugfs_start, 5833 .next = slab_debugfs_next, 5834 .stop = slab_debugfs_stop, 5835 .show = slab_debugfs_show, 5836 }; 5837 5838 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 5839 { 5840 5841 struct kmem_cache_node *n; 5842 enum track_item alloc; 5843 int node; 5844 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 5845 sizeof(struct loc_track)); 5846 struct kmem_cache *s = file_inode(filep)->i_private; 5847 5848 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 5849 alloc = TRACK_ALLOC; 5850 else 5851 alloc = TRACK_FREE; 5852 5853 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) 5854 return -ENOMEM; 5855 5856 /* Push back cpu slabs */ 5857 flush_all(s); 5858 5859 for_each_kmem_cache_node(s, node, n) { 5860 unsigned long flags; 5861 struct page *page; 5862 5863 if (!atomic_long_read(&n->nr_slabs)) 5864 continue; 5865 5866 spin_lock_irqsave(&n->list_lock, flags); 5867 list_for_each_entry(page, &n->partial, slab_list) 5868 process_slab(t, s, page, alloc); 5869 list_for_each_entry(page, &n->full, slab_list) 5870 process_slab(t, s, page, alloc); 5871 spin_unlock_irqrestore(&n->list_lock, flags); 5872 } 5873 5874 return 0; 5875 } 5876 5877 static int slab_debug_trace_release(struct inode *inode, struct file *file) 5878 { 5879 struct seq_file *seq = file->private_data; 5880 struct loc_track *t = seq->private; 5881 5882 free_loc_track(t); 5883 return seq_release_private(inode, file); 5884 } 5885 5886 static const struct file_operations slab_debugfs_fops = { 5887 .open = slab_debug_trace_open, 5888 .read = seq_read, 5889 .llseek = seq_lseek, 5890 .release = slab_debug_trace_release, 5891 }; 5892 5893 static void debugfs_slab_add(struct kmem_cache *s) 5894 { 5895 struct dentry *slab_cache_dir; 5896 5897 if (unlikely(!slab_debugfs_root)) 5898 return; 5899 5900 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 5901 5902 debugfs_create_file("alloc_traces", 0400, 5903 slab_cache_dir, s, &slab_debugfs_fops); 5904 5905 debugfs_create_file("free_traces", 0400, 5906 slab_cache_dir, s, &slab_debugfs_fops); 5907 } 5908 5909 void debugfs_slab_release(struct kmem_cache *s) 5910 { 5911 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); 5912 } 5913 5914 static int __init slab_debugfs_init(void) 5915 { 5916 struct kmem_cache *s; 5917 5918 slab_debugfs_root = debugfs_create_dir("slab", NULL); 5919 5920 list_for_each_entry(s, &slab_caches, list) 5921 if (s->flags & SLAB_STORE_USER) 5922 debugfs_slab_add(s); 5923 5924 return 0; 5925 5926 } 5927 __initcall(slab_debugfs_init); 5928 #endif 5929 /* 5930 * The /proc/slabinfo ABI 5931 */ 5932 #ifdef CONFIG_SLUB_DEBUG 5933 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5934 { 5935 unsigned long nr_slabs = 0; 5936 unsigned long nr_objs = 0; 5937 unsigned long nr_free = 0; 5938 int node; 5939 struct kmem_cache_node *n; 5940 5941 for_each_kmem_cache_node(s, node, n) { 5942 nr_slabs += node_nr_slabs(n); 5943 nr_objs += node_nr_objs(n); 5944 nr_free += count_partial(n, count_free); 5945 } 5946 5947 sinfo->active_objs = nr_objs - nr_free; 5948 sinfo->num_objs = nr_objs; 5949 sinfo->active_slabs = nr_slabs; 5950 sinfo->num_slabs = nr_slabs; 5951 sinfo->objects_per_slab = oo_objects(s->oo); 5952 sinfo->cache_order = oo_order(s->oo); 5953 } 5954 5955 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 5956 { 5957 } 5958 5959 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 5960 size_t count, loff_t *ppos) 5961 { 5962 return -EIO; 5963 } 5964 #endif /* CONFIG_SLUB_DEBUG */ 5965