1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/mempolicy.h> 28 #include <linux/ctype.h> 29 #include <linux/debugobjects.h> 30 #include <linux/kallsyms.h> 31 #include <linux/kfence.h> 32 #include <linux/memory.h> 33 #include <linux/math64.h> 34 #include <linux/fault-inject.h> 35 #include <linux/stacktrace.h> 36 #include <linux/prefetch.h> 37 #include <linux/memcontrol.h> 38 #include <linux/random.h> 39 #include <kunit/test.h> 40 41 #include <linux/debugfs.h> 42 #include <trace/events/kmem.h> 43 44 #include "internal.h" 45 46 /* 47 * Lock order: 48 * 1. slab_mutex (Global Mutex) 49 * 2. node->list_lock 50 * 3. slab_lock(page) (Only on some arches and for debugging) 51 * 52 * slab_mutex 53 * 54 * The role of the slab_mutex is to protect the list of all the slabs 55 * and to synchronize major metadata changes to slab cache structures. 56 * 57 * The slab_lock is only used for debugging and on arches that do not 58 * have the ability to do a cmpxchg_double. It only protects: 59 * A. page->freelist -> List of object free in a page 60 * B. page->inuse -> Number of objects in use 61 * C. page->objects -> Number of objects in page 62 * D. page->frozen -> frozen state 63 * 64 * If a slab is frozen then it is exempt from list management. It is not 65 * on any list except per cpu partial list. The processor that froze the 66 * slab is the one who can perform list operations on the page. Other 67 * processors may put objects onto the freelist but the processor that 68 * froze the slab is the only one that can retrieve the objects from the 69 * page's freelist. 70 * 71 * The list_lock protects the partial and full list on each node and 72 * the partial slab counter. If taken then no new slabs may be added or 73 * removed from the lists nor make the number of partial slabs be modified. 74 * (Note that the total number of slabs is an atomic value that may be 75 * modified without taking the list lock). 76 * 77 * The list_lock is a centralized lock and thus we avoid taking it as 78 * much as possible. As long as SLUB does not have to handle partial 79 * slabs, operations can continue without any centralized lock. F.e. 80 * allocating a long series of objects that fill up slabs does not require 81 * the list lock. 82 * Interrupts are disabled during allocation and deallocation in order to 83 * make the slab allocator safe to use in the context of an irq. In addition 84 * interrupts are disabled to ensure that the processor does not change 85 * while handling per_cpu slabs, due to kernel preemption. 86 * 87 * SLUB assigns one slab for allocation to each processor. 88 * Allocations only occur from these slabs called cpu slabs. 89 * 90 * Slabs with free elements are kept on a partial list and during regular 91 * operations no list for full slabs is used. If an object in a full slab is 92 * freed then the slab will show up again on the partial lists. 93 * We track full slabs for debugging purposes though because otherwise we 94 * cannot scan all objects. 95 * 96 * Slabs are freed when they become empty. Teardown and setup is 97 * minimal so we rely on the page allocators per cpu caches for 98 * fast frees and allocs. 99 * 100 * page->frozen The slab is frozen and exempt from list processing. 101 * This means that the slab is dedicated to a purpose 102 * such as satisfying allocations for a specific 103 * processor. Objects may be freed in the slab while 104 * it is frozen but slab_free will then skip the usual 105 * list operations. It is up to the processor holding 106 * the slab to integrate the slab into the slab lists 107 * when the slab is no longer needed. 108 * 109 * One use of this flag is to mark slabs that are 110 * used for allocations. Then such a slab becomes a cpu 111 * slab. The cpu slab may be equipped with an additional 112 * freelist that allows lockless access to 113 * free objects in addition to the regular freelist 114 * that requires the slab lock. 115 * 116 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 117 * options set. This moves slab handling out of 118 * the fast path and disables lockless freelists. 119 */ 120 121 #ifdef CONFIG_SLUB_DEBUG 122 123 #ifdef CONFIG_SLUB_DEBUG_ON 124 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 125 #else 126 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 127 #endif 128 129 static inline bool __slub_debug_enabled(void) 130 { 131 return static_branch_unlikely(&slub_debug_enabled); 132 } 133 134 #else /* CONFIG_SLUB_DEBUG */ 135 136 static inline bool __slub_debug_enabled(void) 137 { 138 return false; 139 } 140 141 #endif /* CONFIG_SLUB_DEBUG */ 142 143 static inline bool kmem_cache_debug(struct kmem_cache *s) 144 { 145 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 146 } 147 148 void *fixup_red_left(struct kmem_cache *s, void *p) 149 { 150 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 151 p += s->red_left_pad; 152 153 return p; 154 } 155 156 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 157 { 158 #ifdef CONFIG_SLUB_CPU_PARTIAL 159 return !kmem_cache_debug(s); 160 #else 161 return false; 162 #endif 163 } 164 165 /* 166 * Issues still to be resolved: 167 * 168 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 169 * 170 * - Variable sizing of the per node arrays 171 */ 172 173 /* Enable to log cmpxchg failures */ 174 #undef SLUB_DEBUG_CMPXCHG 175 176 /* 177 * Minimum number of partial slabs. These will be left on the partial 178 * lists even if they are empty. kmem_cache_shrink may reclaim them. 179 */ 180 #define MIN_PARTIAL 5 181 182 /* 183 * Maximum number of desirable partial slabs. 184 * The existence of more partial slabs makes kmem_cache_shrink 185 * sort the partial list by the number of objects in use. 186 */ 187 #define MAX_PARTIAL 10 188 189 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 190 SLAB_POISON | SLAB_STORE_USER) 191 192 /* 193 * These debug flags cannot use CMPXCHG because there might be consistency 194 * issues when checking or reading debug information 195 */ 196 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 197 SLAB_TRACE) 198 199 200 /* 201 * Debugging flags that require metadata to be stored in the slab. These get 202 * disabled when slub_debug=O is used and a cache's min order increases with 203 * metadata. 204 */ 205 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 206 207 #define OO_SHIFT 16 208 #define OO_MASK ((1 << OO_SHIFT) - 1) 209 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 210 211 /* Internal SLUB flags */ 212 /* Poison object */ 213 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 214 /* Use cmpxchg_double */ 215 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 216 217 /* 218 * Tracking user of a slab. 219 */ 220 #define TRACK_ADDRS_COUNT 16 221 struct track { 222 unsigned long addr; /* Called from address */ 223 #ifdef CONFIG_STACKTRACE 224 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 225 #endif 226 int cpu; /* Was running on cpu */ 227 int pid; /* Pid context */ 228 unsigned long when; /* When did the operation occur */ 229 }; 230 231 enum track_item { TRACK_ALLOC, TRACK_FREE }; 232 233 #ifdef CONFIG_SYSFS 234 static int sysfs_slab_add(struct kmem_cache *); 235 static int sysfs_slab_alias(struct kmem_cache *, const char *); 236 #else 237 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 238 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 239 { return 0; } 240 #endif 241 242 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 243 static void debugfs_slab_add(struct kmem_cache *); 244 #else 245 static inline void debugfs_slab_add(struct kmem_cache *s) { } 246 #endif 247 248 static inline void stat(const struct kmem_cache *s, enum stat_item si) 249 { 250 #ifdef CONFIG_SLUB_STATS 251 /* 252 * The rmw is racy on a preemptible kernel but this is acceptable, so 253 * avoid this_cpu_add()'s irq-disable overhead. 254 */ 255 raw_cpu_inc(s->cpu_slab->stat[si]); 256 #endif 257 } 258 259 /* 260 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 261 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 262 * differ during memory hotplug/hotremove operations. 263 * Protected by slab_mutex. 264 */ 265 static nodemask_t slab_nodes; 266 267 /******************************************************************** 268 * Core slab cache functions 269 *******************************************************************/ 270 271 /* 272 * Returns freelist pointer (ptr). With hardening, this is obfuscated 273 * with an XOR of the address where the pointer is held and a per-cache 274 * random number. 275 */ 276 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 277 unsigned long ptr_addr) 278 { 279 #ifdef CONFIG_SLAB_FREELIST_HARDENED 280 /* 281 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 282 * Normally, this doesn't cause any issues, as both set_freepointer() 283 * and get_freepointer() are called with a pointer with the same tag. 284 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 285 * example, when __free_slub() iterates over objects in a cache, it 286 * passes untagged pointers to check_object(). check_object() in turns 287 * calls get_freepointer() with an untagged pointer, which causes the 288 * freepointer to be restored incorrectly. 289 */ 290 return (void *)((unsigned long)ptr ^ s->random ^ 291 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 292 #else 293 return ptr; 294 #endif 295 } 296 297 /* Returns the freelist pointer recorded at location ptr_addr. */ 298 static inline void *freelist_dereference(const struct kmem_cache *s, 299 void *ptr_addr) 300 { 301 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 302 (unsigned long)ptr_addr); 303 } 304 305 static inline void *get_freepointer(struct kmem_cache *s, void *object) 306 { 307 object = kasan_reset_tag(object); 308 return freelist_dereference(s, object + s->offset); 309 } 310 311 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 312 { 313 prefetch(object + s->offset); 314 } 315 316 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 317 { 318 unsigned long freepointer_addr; 319 void *p; 320 321 if (!debug_pagealloc_enabled_static()) 322 return get_freepointer(s, object); 323 324 object = kasan_reset_tag(object); 325 freepointer_addr = (unsigned long)object + s->offset; 326 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 327 return freelist_ptr(s, p, freepointer_addr); 328 } 329 330 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 331 { 332 unsigned long freeptr_addr = (unsigned long)object + s->offset; 333 334 #ifdef CONFIG_SLAB_FREELIST_HARDENED 335 BUG_ON(object == fp); /* naive detection of double free or corruption */ 336 #endif 337 338 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 339 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 340 } 341 342 /* Loop over all objects in a slab */ 343 #define for_each_object(__p, __s, __addr, __objects) \ 344 for (__p = fixup_red_left(__s, __addr); \ 345 __p < (__addr) + (__objects) * (__s)->size; \ 346 __p += (__s)->size) 347 348 static inline unsigned int order_objects(unsigned int order, unsigned int size) 349 { 350 return ((unsigned int)PAGE_SIZE << order) / size; 351 } 352 353 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 354 unsigned int size) 355 { 356 struct kmem_cache_order_objects x = { 357 (order << OO_SHIFT) + order_objects(order, size) 358 }; 359 360 return x; 361 } 362 363 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 364 { 365 return x.x >> OO_SHIFT; 366 } 367 368 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 369 { 370 return x.x & OO_MASK; 371 } 372 373 /* 374 * Per slab locking using the pagelock 375 */ 376 static __always_inline void slab_lock(struct page *page) 377 { 378 VM_BUG_ON_PAGE(PageTail(page), page); 379 bit_spin_lock(PG_locked, &page->flags); 380 } 381 382 static __always_inline void slab_unlock(struct page *page) 383 { 384 VM_BUG_ON_PAGE(PageTail(page), page); 385 __bit_spin_unlock(PG_locked, &page->flags); 386 } 387 388 /* Interrupts must be disabled (for the fallback code to work right) */ 389 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 390 void *freelist_old, unsigned long counters_old, 391 void *freelist_new, unsigned long counters_new, 392 const char *n) 393 { 394 VM_BUG_ON(!irqs_disabled()); 395 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 396 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 397 if (s->flags & __CMPXCHG_DOUBLE) { 398 if (cmpxchg_double(&page->freelist, &page->counters, 399 freelist_old, counters_old, 400 freelist_new, counters_new)) 401 return true; 402 } else 403 #endif 404 { 405 slab_lock(page); 406 if (page->freelist == freelist_old && 407 page->counters == counters_old) { 408 page->freelist = freelist_new; 409 page->counters = counters_new; 410 slab_unlock(page); 411 return true; 412 } 413 slab_unlock(page); 414 } 415 416 cpu_relax(); 417 stat(s, CMPXCHG_DOUBLE_FAIL); 418 419 #ifdef SLUB_DEBUG_CMPXCHG 420 pr_info("%s %s: cmpxchg double redo ", n, s->name); 421 #endif 422 423 return false; 424 } 425 426 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 427 void *freelist_old, unsigned long counters_old, 428 void *freelist_new, unsigned long counters_new, 429 const char *n) 430 { 431 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 432 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 433 if (s->flags & __CMPXCHG_DOUBLE) { 434 if (cmpxchg_double(&page->freelist, &page->counters, 435 freelist_old, counters_old, 436 freelist_new, counters_new)) 437 return true; 438 } else 439 #endif 440 { 441 unsigned long flags; 442 443 local_irq_save(flags); 444 slab_lock(page); 445 if (page->freelist == freelist_old && 446 page->counters == counters_old) { 447 page->freelist = freelist_new; 448 page->counters = counters_new; 449 slab_unlock(page); 450 local_irq_restore(flags); 451 return true; 452 } 453 slab_unlock(page); 454 local_irq_restore(flags); 455 } 456 457 cpu_relax(); 458 stat(s, CMPXCHG_DOUBLE_FAIL); 459 460 #ifdef SLUB_DEBUG_CMPXCHG 461 pr_info("%s %s: cmpxchg double redo ", n, s->name); 462 #endif 463 464 return false; 465 } 466 467 #ifdef CONFIG_SLUB_DEBUG 468 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 469 static DEFINE_SPINLOCK(object_map_lock); 470 471 #if IS_ENABLED(CONFIG_KUNIT) 472 static bool slab_add_kunit_errors(void) 473 { 474 struct kunit_resource *resource; 475 476 if (likely(!current->kunit_test)) 477 return false; 478 479 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 480 if (!resource) 481 return false; 482 483 (*(int *)resource->data)++; 484 kunit_put_resource(resource); 485 return true; 486 } 487 #else 488 static inline bool slab_add_kunit_errors(void) { return false; } 489 #endif 490 491 /* 492 * Determine a map of object in use on a page. 493 * 494 * Node listlock must be held to guarantee that the page does 495 * not vanish from under us. 496 */ 497 static unsigned long *get_map(struct kmem_cache *s, struct page *page) 498 __acquires(&object_map_lock) 499 { 500 void *p; 501 void *addr = page_address(page); 502 503 VM_BUG_ON(!irqs_disabled()); 504 505 spin_lock(&object_map_lock); 506 507 bitmap_zero(object_map, page->objects); 508 509 for (p = page->freelist; p; p = get_freepointer(s, p)) 510 set_bit(__obj_to_index(s, addr, p), object_map); 511 512 return object_map; 513 } 514 515 static void put_map(unsigned long *map) __releases(&object_map_lock) 516 { 517 VM_BUG_ON(map != object_map); 518 spin_unlock(&object_map_lock); 519 } 520 521 static inline unsigned int size_from_object(struct kmem_cache *s) 522 { 523 if (s->flags & SLAB_RED_ZONE) 524 return s->size - s->red_left_pad; 525 526 return s->size; 527 } 528 529 static inline void *restore_red_left(struct kmem_cache *s, void *p) 530 { 531 if (s->flags & SLAB_RED_ZONE) 532 p -= s->red_left_pad; 533 534 return p; 535 } 536 537 /* 538 * Debug settings: 539 */ 540 #if defined(CONFIG_SLUB_DEBUG_ON) 541 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 542 #else 543 static slab_flags_t slub_debug; 544 #endif 545 546 static char *slub_debug_string; 547 static int disable_higher_order_debug; 548 549 /* 550 * slub is about to manipulate internal object metadata. This memory lies 551 * outside the range of the allocated object, so accessing it would normally 552 * be reported by kasan as a bounds error. metadata_access_enable() is used 553 * to tell kasan that these accesses are OK. 554 */ 555 static inline void metadata_access_enable(void) 556 { 557 kasan_disable_current(); 558 } 559 560 static inline void metadata_access_disable(void) 561 { 562 kasan_enable_current(); 563 } 564 565 /* 566 * Object debugging 567 */ 568 569 /* Verify that a pointer has an address that is valid within a slab page */ 570 static inline int check_valid_pointer(struct kmem_cache *s, 571 struct page *page, void *object) 572 { 573 void *base; 574 575 if (!object) 576 return 1; 577 578 base = page_address(page); 579 object = kasan_reset_tag(object); 580 object = restore_red_left(s, object); 581 if (object < base || object >= base + page->objects * s->size || 582 (object - base) % s->size) { 583 return 0; 584 } 585 586 return 1; 587 } 588 589 static void print_section(char *level, char *text, u8 *addr, 590 unsigned int length) 591 { 592 metadata_access_enable(); 593 print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, 594 16, 1, addr, length, 1); 595 metadata_access_disable(); 596 } 597 598 /* 599 * See comment in calculate_sizes(). 600 */ 601 static inline bool freeptr_outside_object(struct kmem_cache *s) 602 { 603 return s->offset >= s->inuse; 604 } 605 606 /* 607 * Return offset of the end of info block which is inuse + free pointer if 608 * not overlapping with object. 609 */ 610 static inline unsigned int get_info_end(struct kmem_cache *s) 611 { 612 if (freeptr_outside_object(s)) 613 return s->inuse + sizeof(void *); 614 else 615 return s->inuse; 616 } 617 618 static struct track *get_track(struct kmem_cache *s, void *object, 619 enum track_item alloc) 620 { 621 struct track *p; 622 623 p = object + get_info_end(s); 624 625 return kasan_reset_tag(p + alloc); 626 } 627 628 static void set_track(struct kmem_cache *s, void *object, 629 enum track_item alloc, unsigned long addr) 630 { 631 struct track *p = get_track(s, object, alloc); 632 633 if (addr) { 634 #ifdef CONFIG_STACKTRACE 635 unsigned int nr_entries; 636 637 metadata_access_enable(); 638 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), 639 TRACK_ADDRS_COUNT, 3); 640 metadata_access_disable(); 641 642 if (nr_entries < TRACK_ADDRS_COUNT) 643 p->addrs[nr_entries] = 0; 644 #endif 645 p->addr = addr; 646 p->cpu = smp_processor_id(); 647 p->pid = current->pid; 648 p->when = jiffies; 649 } else { 650 memset(p, 0, sizeof(struct track)); 651 } 652 } 653 654 static void init_tracking(struct kmem_cache *s, void *object) 655 { 656 if (!(s->flags & SLAB_STORE_USER)) 657 return; 658 659 set_track(s, object, TRACK_FREE, 0UL); 660 set_track(s, object, TRACK_ALLOC, 0UL); 661 } 662 663 static void print_track(const char *s, struct track *t, unsigned long pr_time) 664 { 665 if (!t->addr) 666 return; 667 668 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 669 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 670 #ifdef CONFIG_STACKTRACE 671 { 672 int i; 673 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 674 if (t->addrs[i]) 675 pr_err("\t%pS\n", (void *)t->addrs[i]); 676 else 677 break; 678 } 679 #endif 680 } 681 682 void print_tracking(struct kmem_cache *s, void *object) 683 { 684 unsigned long pr_time = jiffies; 685 if (!(s->flags & SLAB_STORE_USER)) 686 return; 687 688 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 689 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 690 } 691 692 static void print_page_info(struct page *page) 693 { 694 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", 695 page, page->objects, page->inuse, page->freelist, 696 page->flags, &page->flags); 697 698 } 699 700 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 701 { 702 struct va_format vaf; 703 va_list args; 704 705 va_start(args, fmt); 706 vaf.fmt = fmt; 707 vaf.va = &args; 708 pr_err("=============================================================================\n"); 709 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 710 pr_err("-----------------------------------------------------------------------------\n\n"); 711 va_end(args); 712 } 713 714 __printf(2, 3) 715 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 716 { 717 struct va_format vaf; 718 va_list args; 719 720 if (slab_add_kunit_errors()) 721 return; 722 723 va_start(args, fmt); 724 vaf.fmt = fmt; 725 vaf.va = &args; 726 pr_err("FIX %s: %pV\n", s->name, &vaf); 727 va_end(args); 728 } 729 730 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 731 void **freelist, void *nextfree) 732 { 733 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 734 !check_valid_pointer(s, page, nextfree) && freelist) { 735 object_err(s, page, *freelist, "Freechain corrupt"); 736 *freelist = NULL; 737 slab_fix(s, "Isolate corrupted freechain"); 738 return true; 739 } 740 741 return false; 742 } 743 744 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 745 { 746 unsigned int off; /* Offset of last byte */ 747 u8 *addr = page_address(page); 748 749 print_tracking(s, p); 750 751 print_page_info(page); 752 753 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 754 p, p - addr, get_freepointer(s, p)); 755 756 if (s->flags & SLAB_RED_ZONE) 757 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 758 s->red_left_pad); 759 else if (p > addr + 16) 760 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 761 762 print_section(KERN_ERR, "Object ", p, 763 min_t(unsigned int, s->object_size, PAGE_SIZE)); 764 if (s->flags & SLAB_RED_ZONE) 765 print_section(KERN_ERR, "Redzone ", p + s->object_size, 766 s->inuse - s->object_size); 767 768 off = get_info_end(s); 769 770 if (s->flags & SLAB_STORE_USER) 771 off += 2 * sizeof(struct track); 772 773 off += kasan_metadata_size(s); 774 775 if (off != size_from_object(s)) 776 /* Beginning of the filler is the free pointer */ 777 print_section(KERN_ERR, "Padding ", p + off, 778 size_from_object(s) - off); 779 780 dump_stack(); 781 } 782 783 void object_err(struct kmem_cache *s, struct page *page, 784 u8 *object, char *reason) 785 { 786 if (slab_add_kunit_errors()) 787 return; 788 789 slab_bug(s, "%s", reason); 790 print_trailer(s, page, object); 791 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 792 } 793 794 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, 795 const char *fmt, ...) 796 { 797 va_list args; 798 char buf[100]; 799 800 if (slab_add_kunit_errors()) 801 return; 802 803 va_start(args, fmt); 804 vsnprintf(buf, sizeof(buf), fmt, args); 805 va_end(args); 806 slab_bug(s, "%s", buf); 807 print_page_info(page); 808 dump_stack(); 809 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 810 } 811 812 static void init_object(struct kmem_cache *s, void *object, u8 val) 813 { 814 u8 *p = kasan_reset_tag(object); 815 816 if (s->flags & SLAB_RED_ZONE) 817 memset(p - s->red_left_pad, val, s->red_left_pad); 818 819 if (s->flags & __OBJECT_POISON) { 820 memset(p, POISON_FREE, s->object_size - 1); 821 p[s->object_size - 1] = POISON_END; 822 } 823 824 if (s->flags & SLAB_RED_ZONE) 825 memset(p + s->object_size, val, s->inuse - s->object_size); 826 } 827 828 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 829 void *from, void *to) 830 { 831 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 832 memset(from, data, to - from); 833 } 834 835 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 836 u8 *object, char *what, 837 u8 *start, unsigned int value, unsigned int bytes) 838 { 839 u8 *fault; 840 u8 *end; 841 u8 *addr = page_address(page); 842 843 metadata_access_enable(); 844 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 845 metadata_access_disable(); 846 if (!fault) 847 return 1; 848 849 end = start + bytes; 850 while (end > fault && end[-1] == value) 851 end--; 852 853 if (slab_add_kunit_errors()) 854 goto skip_bug_print; 855 856 slab_bug(s, "%s overwritten", what); 857 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 858 fault, end - 1, fault - addr, 859 fault[0], value); 860 print_trailer(s, page, object); 861 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 862 863 skip_bug_print: 864 restore_bytes(s, what, value, fault, end); 865 return 0; 866 } 867 868 /* 869 * Object layout: 870 * 871 * object address 872 * Bytes of the object to be managed. 873 * If the freepointer may overlay the object then the free 874 * pointer is at the middle of the object. 875 * 876 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 877 * 0xa5 (POISON_END) 878 * 879 * object + s->object_size 880 * Padding to reach word boundary. This is also used for Redzoning. 881 * Padding is extended by another word if Redzoning is enabled and 882 * object_size == inuse. 883 * 884 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 885 * 0xcc (RED_ACTIVE) for objects in use. 886 * 887 * object + s->inuse 888 * Meta data starts here. 889 * 890 * A. Free pointer (if we cannot overwrite object on free) 891 * B. Tracking data for SLAB_STORE_USER 892 * C. Padding to reach required alignment boundary or at minimum 893 * one word if debugging is on to be able to detect writes 894 * before the word boundary. 895 * 896 * Padding is done using 0x5a (POISON_INUSE) 897 * 898 * object + s->size 899 * Nothing is used beyond s->size. 900 * 901 * If slabcaches are merged then the object_size and inuse boundaries are mostly 902 * ignored. And therefore no slab options that rely on these boundaries 903 * may be used with merged slabcaches. 904 */ 905 906 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 907 { 908 unsigned long off = get_info_end(s); /* The end of info */ 909 910 if (s->flags & SLAB_STORE_USER) 911 /* We also have user information there */ 912 off += 2 * sizeof(struct track); 913 914 off += kasan_metadata_size(s); 915 916 if (size_from_object(s) == off) 917 return 1; 918 919 return check_bytes_and_report(s, page, p, "Object padding", 920 p + off, POISON_INUSE, size_from_object(s) - off); 921 } 922 923 /* Check the pad bytes at the end of a slab page */ 924 static int slab_pad_check(struct kmem_cache *s, struct page *page) 925 { 926 u8 *start; 927 u8 *fault; 928 u8 *end; 929 u8 *pad; 930 int length; 931 int remainder; 932 933 if (!(s->flags & SLAB_POISON)) 934 return 1; 935 936 start = page_address(page); 937 length = page_size(page); 938 end = start + length; 939 remainder = length % s->size; 940 if (!remainder) 941 return 1; 942 943 pad = end - remainder; 944 metadata_access_enable(); 945 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 946 metadata_access_disable(); 947 if (!fault) 948 return 1; 949 while (end > fault && end[-1] == POISON_INUSE) 950 end--; 951 952 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", 953 fault, end - 1, fault - start); 954 print_section(KERN_ERR, "Padding ", pad, remainder); 955 956 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 957 return 0; 958 } 959 960 static int check_object(struct kmem_cache *s, struct page *page, 961 void *object, u8 val) 962 { 963 u8 *p = object; 964 u8 *endobject = object + s->object_size; 965 966 if (s->flags & SLAB_RED_ZONE) { 967 if (!check_bytes_and_report(s, page, object, "Left Redzone", 968 object - s->red_left_pad, val, s->red_left_pad)) 969 return 0; 970 971 if (!check_bytes_and_report(s, page, object, "Right Redzone", 972 endobject, val, s->inuse - s->object_size)) 973 return 0; 974 } else { 975 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 976 check_bytes_and_report(s, page, p, "Alignment padding", 977 endobject, POISON_INUSE, 978 s->inuse - s->object_size); 979 } 980 } 981 982 if (s->flags & SLAB_POISON) { 983 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 984 (!check_bytes_and_report(s, page, p, "Poison", p, 985 POISON_FREE, s->object_size - 1) || 986 !check_bytes_and_report(s, page, p, "End Poison", 987 p + s->object_size - 1, POISON_END, 1))) 988 return 0; 989 /* 990 * check_pad_bytes cleans up on its own. 991 */ 992 check_pad_bytes(s, page, p); 993 } 994 995 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 996 /* 997 * Object and freepointer overlap. Cannot check 998 * freepointer while object is allocated. 999 */ 1000 return 1; 1001 1002 /* Check free pointer validity */ 1003 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 1004 object_err(s, page, p, "Freepointer corrupt"); 1005 /* 1006 * No choice but to zap it and thus lose the remainder 1007 * of the free objects in this slab. May cause 1008 * another error because the object count is now wrong. 1009 */ 1010 set_freepointer(s, p, NULL); 1011 return 0; 1012 } 1013 return 1; 1014 } 1015 1016 static int check_slab(struct kmem_cache *s, struct page *page) 1017 { 1018 int maxobj; 1019 1020 VM_BUG_ON(!irqs_disabled()); 1021 1022 if (!PageSlab(page)) { 1023 slab_err(s, page, "Not a valid slab page"); 1024 return 0; 1025 } 1026 1027 maxobj = order_objects(compound_order(page), s->size); 1028 if (page->objects > maxobj) { 1029 slab_err(s, page, "objects %u > max %u", 1030 page->objects, maxobj); 1031 return 0; 1032 } 1033 if (page->inuse > page->objects) { 1034 slab_err(s, page, "inuse %u > max %u", 1035 page->inuse, page->objects); 1036 return 0; 1037 } 1038 /* Slab_pad_check fixes things up after itself */ 1039 slab_pad_check(s, page); 1040 return 1; 1041 } 1042 1043 /* 1044 * Determine if a certain object on a page is on the freelist. Must hold the 1045 * slab lock to guarantee that the chains are in a consistent state. 1046 */ 1047 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 1048 { 1049 int nr = 0; 1050 void *fp; 1051 void *object = NULL; 1052 int max_objects; 1053 1054 fp = page->freelist; 1055 while (fp && nr <= page->objects) { 1056 if (fp == search) 1057 return 1; 1058 if (!check_valid_pointer(s, page, fp)) { 1059 if (object) { 1060 object_err(s, page, object, 1061 "Freechain corrupt"); 1062 set_freepointer(s, object, NULL); 1063 } else { 1064 slab_err(s, page, "Freepointer corrupt"); 1065 page->freelist = NULL; 1066 page->inuse = page->objects; 1067 slab_fix(s, "Freelist cleared"); 1068 return 0; 1069 } 1070 break; 1071 } 1072 object = fp; 1073 fp = get_freepointer(s, object); 1074 nr++; 1075 } 1076 1077 max_objects = order_objects(compound_order(page), s->size); 1078 if (max_objects > MAX_OBJS_PER_PAGE) 1079 max_objects = MAX_OBJS_PER_PAGE; 1080 1081 if (page->objects != max_objects) { 1082 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 1083 page->objects, max_objects); 1084 page->objects = max_objects; 1085 slab_fix(s, "Number of objects adjusted"); 1086 } 1087 if (page->inuse != page->objects - nr) { 1088 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1089 page->inuse, page->objects - nr); 1090 page->inuse = page->objects - nr; 1091 slab_fix(s, "Object count adjusted"); 1092 } 1093 return search == NULL; 1094 } 1095 1096 static void trace(struct kmem_cache *s, struct page *page, void *object, 1097 int alloc) 1098 { 1099 if (s->flags & SLAB_TRACE) { 1100 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1101 s->name, 1102 alloc ? "alloc" : "free", 1103 object, page->inuse, 1104 page->freelist); 1105 1106 if (!alloc) 1107 print_section(KERN_INFO, "Object ", (void *)object, 1108 s->object_size); 1109 1110 dump_stack(); 1111 } 1112 } 1113 1114 /* 1115 * Tracking of fully allocated slabs for debugging purposes. 1116 */ 1117 static void add_full(struct kmem_cache *s, 1118 struct kmem_cache_node *n, struct page *page) 1119 { 1120 if (!(s->flags & SLAB_STORE_USER)) 1121 return; 1122 1123 lockdep_assert_held(&n->list_lock); 1124 list_add(&page->slab_list, &n->full); 1125 } 1126 1127 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1128 { 1129 if (!(s->flags & SLAB_STORE_USER)) 1130 return; 1131 1132 lockdep_assert_held(&n->list_lock); 1133 list_del(&page->slab_list); 1134 } 1135 1136 /* Tracking of the number of slabs for debugging purposes */ 1137 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1138 { 1139 struct kmem_cache_node *n = get_node(s, node); 1140 1141 return atomic_long_read(&n->nr_slabs); 1142 } 1143 1144 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1145 { 1146 return atomic_long_read(&n->nr_slabs); 1147 } 1148 1149 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1150 { 1151 struct kmem_cache_node *n = get_node(s, node); 1152 1153 /* 1154 * May be called early in order to allocate a slab for the 1155 * kmem_cache_node structure. Solve the chicken-egg 1156 * dilemma by deferring the increment of the count during 1157 * bootstrap (see early_kmem_cache_node_alloc). 1158 */ 1159 if (likely(n)) { 1160 atomic_long_inc(&n->nr_slabs); 1161 atomic_long_add(objects, &n->total_objects); 1162 } 1163 } 1164 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1165 { 1166 struct kmem_cache_node *n = get_node(s, node); 1167 1168 atomic_long_dec(&n->nr_slabs); 1169 atomic_long_sub(objects, &n->total_objects); 1170 } 1171 1172 /* Object debug checks for alloc/free paths */ 1173 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1174 void *object) 1175 { 1176 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1177 return; 1178 1179 init_object(s, object, SLUB_RED_INACTIVE); 1180 init_tracking(s, object); 1181 } 1182 1183 static 1184 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1185 { 1186 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1187 return; 1188 1189 metadata_access_enable(); 1190 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page)); 1191 metadata_access_disable(); 1192 } 1193 1194 static inline int alloc_consistency_checks(struct kmem_cache *s, 1195 struct page *page, void *object) 1196 { 1197 if (!check_slab(s, page)) 1198 return 0; 1199 1200 if (!check_valid_pointer(s, page, object)) { 1201 object_err(s, page, object, "Freelist Pointer check fails"); 1202 return 0; 1203 } 1204 1205 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1206 return 0; 1207 1208 return 1; 1209 } 1210 1211 static noinline int alloc_debug_processing(struct kmem_cache *s, 1212 struct page *page, 1213 void *object, unsigned long addr) 1214 { 1215 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1216 if (!alloc_consistency_checks(s, page, object)) 1217 goto bad; 1218 } 1219 1220 /* Success perform special debug activities for allocs */ 1221 if (s->flags & SLAB_STORE_USER) 1222 set_track(s, object, TRACK_ALLOC, addr); 1223 trace(s, page, object, 1); 1224 init_object(s, object, SLUB_RED_ACTIVE); 1225 return 1; 1226 1227 bad: 1228 if (PageSlab(page)) { 1229 /* 1230 * If this is a slab page then lets do the best we can 1231 * to avoid issues in the future. Marking all objects 1232 * as used avoids touching the remaining objects. 1233 */ 1234 slab_fix(s, "Marking all objects used"); 1235 page->inuse = page->objects; 1236 page->freelist = NULL; 1237 } 1238 return 0; 1239 } 1240 1241 static inline int free_consistency_checks(struct kmem_cache *s, 1242 struct page *page, void *object, unsigned long addr) 1243 { 1244 if (!check_valid_pointer(s, page, object)) { 1245 slab_err(s, page, "Invalid object pointer 0x%p", object); 1246 return 0; 1247 } 1248 1249 if (on_freelist(s, page, object)) { 1250 object_err(s, page, object, "Object already free"); 1251 return 0; 1252 } 1253 1254 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1255 return 0; 1256 1257 if (unlikely(s != page->slab_cache)) { 1258 if (!PageSlab(page)) { 1259 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1260 object); 1261 } else if (!page->slab_cache) { 1262 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1263 object); 1264 dump_stack(); 1265 } else 1266 object_err(s, page, object, 1267 "page slab pointer corrupt."); 1268 return 0; 1269 } 1270 return 1; 1271 } 1272 1273 /* Supports checking bulk free of a constructed freelist */ 1274 static noinline int free_debug_processing( 1275 struct kmem_cache *s, struct page *page, 1276 void *head, void *tail, int bulk_cnt, 1277 unsigned long addr) 1278 { 1279 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1280 void *object = head; 1281 int cnt = 0; 1282 unsigned long flags; 1283 int ret = 0; 1284 1285 spin_lock_irqsave(&n->list_lock, flags); 1286 slab_lock(page); 1287 1288 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1289 if (!check_slab(s, page)) 1290 goto out; 1291 } 1292 1293 next_object: 1294 cnt++; 1295 1296 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1297 if (!free_consistency_checks(s, page, object, addr)) 1298 goto out; 1299 } 1300 1301 if (s->flags & SLAB_STORE_USER) 1302 set_track(s, object, TRACK_FREE, addr); 1303 trace(s, page, object, 0); 1304 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1305 init_object(s, object, SLUB_RED_INACTIVE); 1306 1307 /* Reached end of constructed freelist yet? */ 1308 if (object != tail) { 1309 object = get_freepointer(s, object); 1310 goto next_object; 1311 } 1312 ret = 1; 1313 1314 out: 1315 if (cnt != bulk_cnt) 1316 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1317 bulk_cnt, cnt); 1318 1319 slab_unlock(page); 1320 spin_unlock_irqrestore(&n->list_lock, flags); 1321 if (!ret) 1322 slab_fix(s, "Object at 0x%p not freed", object); 1323 return ret; 1324 } 1325 1326 /* 1327 * Parse a block of slub_debug options. Blocks are delimited by ';' 1328 * 1329 * @str: start of block 1330 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1331 * @slabs: return start of list of slabs, or NULL when there's no list 1332 * @init: assume this is initial parsing and not per-kmem-create parsing 1333 * 1334 * returns the start of next block if there's any, or NULL 1335 */ 1336 static char * 1337 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1338 { 1339 bool higher_order_disable = false; 1340 1341 /* Skip any completely empty blocks */ 1342 while (*str && *str == ';') 1343 str++; 1344 1345 if (*str == ',') { 1346 /* 1347 * No options but restriction on slabs. This means full 1348 * debugging for slabs matching a pattern. 1349 */ 1350 *flags = DEBUG_DEFAULT_FLAGS; 1351 goto check_slabs; 1352 } 1353 *flags = 0; 1354 1355 /* Determine which debug features should be switched on */ 1356 for (; *str && *str != ',' && *str != ';'; str++) { 1357 switch (tolower(*str)) { 1358 case '-': 1359 *flags = 0; 1360 break; 1361 case 'f': 1362 *flags |= SLAB_CONSISTENCY_CHECKS; 1363 break; 1364 case 'z': 1365 *flags |= SLAB_RED_ZONE; 1366 break; 1367 case 'p': 1368 *flags |= SLAB_POISON; 1369 break; 1370 case 'u': 1371 *flags |= SLAB_STORE_USER; 1372 break; 1373 case 't': 1374 *flags |= SLAB_TRACE; 1375 break; 1376 case 'a': 1377 *flags |= SLAB_FAILSLAB; 1378 break; 1379 case 'o': 1380 /* 1381 * Avoid enabling debugging on caches if its minimum 1382 * order would increase as a result. 1383 */ 1384 higher_order_disable = true; 1385 break; 1386 default: 1387 if (init) 1388 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1389 } 1390 } 1391 check_slabs: 1392 if (*str == ',') 1393 *slabs = ++str; 1394 else 1395 *slabs = NULL; 1396 1397 /* Skip over the slab list */ 1398 while (*str && *str != ';') 1399 str++; 1400 1401 /* Skip any completely empty blocks */ 1402 while (*str && *str == ';') 1403 str++; 1404 1405 if (init && higher_order_disable) 1406 disable_higher_order_debug = 1; 1407 1408 if (*str) 1409 return str; 1410 else 1411 return NULL; 1412 } 1413 1414 static int __init setup_slub_debug(char *str) 1415 { 1416 slab_flags_t flags; 1417 char *saved_str; 1418 char *slab_list; 1419 bool global_slub_debug_changed = false; 1420 bool slab_list_specified = false; 1421 1422 slub_debug = DEBUG_DEFAULT_FLAGS; 1423 if (*str++ != '=' || !*str) 1424 /* 1425 * No options specified. Switch on full debugging. 1426 */ 1427 goto out; 1428 1429 saved_str = str; 1430 while (str) { 1431 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1432 1433 if (!slab_list) { 1434 slub_debug = flags; 1435 global_slub_debug_changed = true; 1436 } else { 1437 slab_list_specified = true; 1438 } 1439 } 1440 1441 /* 1442 * For backwards compatibility, a single list of flags with list of 1443 * slabs means debugging is only enabled for those slabs, so the global 1444 * slub_debug should be 0. We can extended that to multiple lists as 1445 * long as there is no option specifying flags without a slab list. 1446 */ 1447 if (slab_list_specified) { 1448 if (!global_slub_debug_changed) 1449 slub_debug = 0; 1450 slub_debug_string = saved_str; 1451 } 1452 out: 1453 if (slub_debug != 0 || slub_debug_string) 1454 static_branch_enable(&slub_debug_enabled); 1455 else 1456 static_branch_disable(&slub_debug_enabled); 1457 if ((static_branch_unlikely(&init_on_alloc) || 1458 static_branch_unlikely(&init_on_free)) && 1459 (slub_debug & SLAB_POISON)) 1460 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1461 return 1; 1462 } 1463 1464 __setup("slub_debug", setup_slub_debug); 1465 1466 /* 1467 * kmem_cache_flags - apply debugging options to the cache 1468 * @object_size: the size of an object without meta data 1469 * @flags: flags to set 1470 * @name: name of the cache 1471 * 1472 * Debug option(s) are applied to @flags. In addition to the debug 1473 * option(s), if a slab name (or multiple) is specified i.e. 1474 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1475 * then only the select slabs will receive the debug option(s). 1476 */ 1477 slab_flags_t kmem_cache_flags(unsigned int object_size, 1478 slab_flags_t flags, const char *name) 1479 { 1480 char *iter; 1481 size_t len; 1482 char *next_block; 1483 slab_flags_t block_flags; 1484 slab_flags_t slub_debug_local = slub_debug; 1485 1486 /* 1487 * If the slab cache is for debugging (e.g. kmemleak) then 1488 * don't store user (stack trace) information by default, 1489 * but let the user enable it via the command line below. 1490 */ 1491 if (flags & SLAB_NOLEAKTRACE) 1492 slub_debug_local &= ~SLAB_STORE_USER; 1493 1494 len = strlen(name); 1495 next_block = slub_debug_string; 1496 /* Go through all blocks of debug options, see if any matches our slab's name */ 1497 while (next_block) { 1498 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1499 if (!iter) 1500 continue; 1501 /* Found a block that has a slab list, search it */ 1502 while (*iter) { 1503 char *end, *glob; 1504 size_t cmplen; 1505 1506 end = strchrnul(iter, ','); 1507 if (next_block && next_block < end) 1508 end = next_block - 1; 1509 1510 glob = strnchr(iter, end - iter, '*'); 1511 if (glob) 1512 cmplen = glob - iter; 1513 else 1514 cmplen = max_t(size_t, len, (end - iter)); 1515 1516 if (!strncmp(name, iter, cmplen)) { 1517 flags |= block_flags; 1518 return flags; 1519 } 1520 1521 if (!*end || *end == ';') 1522 break; 1523 iter = end + 1; 1524 } 1525 } 1526 1527 return flags | slub_debug_local; 1528 } 1529 #else /* !CONFIG_SLUB_DEBUG */ 1530 static inline void setup_object_debug(struct kmem_cache *s, 1531 struct page *page, void *object) {} 1532 static inline 1533 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1534 1535 static inline int alloc_debug_processing(struct kmem_cache *s, 1536 struct page *page, void *object, unsigned long addr) { return 0; } 1537 1538 static inline int free_debug_processing( 1539 struct kmem_cache *s, struct page *page, 1540 void *head, void *tail, int bulk_cnt, 1541 unsigned long addr) { return 0; } 1542 1543 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1544 { return 1; } 1545 static inline int check_object(struct kmem_cache *s, struct page *page, 1546 void *object, u8 val) { return 1; } 1547 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1548 struct page *page) {} 1549 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1550 struct page *page) {} 1551 slab_flags_t kmem_cache_flags(unsigned int object_size, 1552 slab_flags_t flags, const char *name) 1553 { 1554 return flags; 1555 } 1556 #define slub_debug 0 1557 1558 #define disable_higher_order_debug 0 1559 1560 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1561 { return 0; } 1562 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1563 { return 0; } 1564 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1565 int objects) {} 1566 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1567 int objects) {} 1568 1569 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 1570 void **freelist, void *nextfree) 1571 { 1572 return false; 1573 } 1574 #endif /* CONFIG_SLUB_DEBUG */ 1575 1576 /* 1577 * Hooks for other subsystems that check memory allocations. In a typical 1578 * production configuration these hooks all should produce no code at all. 1579 */ 1580 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1581 { 1582 ptr = kasan_kmalloc_large(ptr, size, flags); 1583 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1584 kmemleak_alloc(ptr, size, 1, flags); 1585 return ptr; 1586 } 1587 1588 static __always_inline void kfree_hook(void *x) 1589 { 1590 kmemleak_free(x); 1591 kasan_kfree_large(x); 1592 } 1593 1594 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1595 void *x, bool init) 1596 { 1597 kmemleak_free_recursive(x, s->flags); 1598 1599 /* 1600 * Trouble is that we may no longer disable interrupts in the fast path 1601 * So in order to make the debug calls that expect irqs to be 1602 * disabled we need to disable interrupts temporarily. 1603 */ 1604 #ifdef CONFIG_LOCKDEP 1605 { 1606 unsigned long flags; 1607 1608 local_irq_save(flags); 1609 debug_check_no_locks_freed(x, s->object_size); 1610 local_irq_restore(flags); 1611 } 1612 #endif 1613 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1614 debug_check_no_obj_freed(x, s->object_size); 1615 1616 /* Use KCSAN to help debug racy use-after-free. */ 1617 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1618 __kcsan_check_access(x, s->object_size, 1619 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1620 1621 /* 1622 * As memory initialization might be integrated into KASAN, 1623 * kasan_slab_free and initialization memset's must be 1624 * kept together to avoid discrepancies in behavior. 1625 * 1626 * The initialization memset's clear the object and the metadata, 1627 * but don't touch the SLAB redzone. 1628 */ 1629 if (init) { 1630 int rsize; 1631 1632 if (!kasan_has_integrated_init()) 1633 memset(kasan_reset_tag(x), 0, s->object_size); 1634 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1635 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1636 s->size - s->inuse - rsize); 1637 } 1638 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1639 return kasan_slab_free(s, x, init); 1640 } 1641 1642 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1643 void **head, void **tail) 1644 { 1645 1646 void *object; 1647 void *next = *head; 1648 void *old_tail = *tail ? *tail : *head; 1649 1650 if (is_kfence_address(next)) { 1651 slab_free_hook(s, next, false); 1652 return true; 1653 } 1654 1655 /* Head and tail of the reconstructed freelist */ 1656 *head = NULL; 1657 *tail = NULL; 1658 1659 do { 1660 object = next; 1661 next = get_freepointer(s, object); 1662 1663 /* If object's reuse doesn't have to be delayed */ 1664 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1665 /* Move object to the new freelist */ 1666 set_freepointer(s, object, *head); 1667 *head = object; 1668 if (!*tail) 1669 *tail = object; 1670 } 1671 } while (object != old_tail); 1672 1673 if (*head == *tail) 1674 *tail = NULL; 1675 1676 return *head != NULL; 1677 } 1678 1679 static void *setup_object(struct kmem_cache *s, struct page *page, 1680 void *object) 1681 { 1682 setup_object_debug(s, page, object); 1683 object = kasan_init_slab_obj(s, object); 1684 if (unlikely(s->ctor)) { 1685 kasan_unpoison_object_data(s, object); 1686 s->ctor(object); 1687 kasan_poison_object_data(s, object); 1688 } 1689 return object; 1690 } 1691 1692 /* 1693 * Slab allocation and freeing 1694 */ 1695 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1696 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1697 { 1698 struct page *page; 1699 unsigned int order = oo_order(oo); 1700 1701 if (node == NUMA_NO_NODE) 1702 page = alloc_pages(flags, order); 1703 else 1704 page = __alloc_pages_node(node, flags, order); 1705 1706 return page; 1707 } 1708 1709 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1710 /* Pre-initialize the random sequence cache */ 1711 static int init_cache_random_seq(struct kmem_cache *s) 1712 { 1713 unsigned int count = oo_objects(s->oo); 1714 int err; 1715 1716 /* Bailout if already initialised */ 1717 if (s->random_seq) 1718 return 0; 1719 1720 err = cache_random_seq_create(s, count, GFP_KERNEL); 1721 if (err) { 1722 pr_err("SLUB: Unable to initialize free list for %s\n", 1723 s->name); 1724 return err; 1725 } 1726 1727 /* Transform to an offset on the set of pages */ 1728 if (s->random_seq) { 1729 unsigned int i; 1730 1731 for (i = 0; i < count; i++) 1732 s->random_seq[i] *= s->size; 1733 } 1734 return 0; 1735 } 1736 1737 /* Initialize each random sequence freelist per cache */ 1738 static void __init init_freelist_randomization(void) 1739 { 1740 struct kmem_cache *s; 1741 1742 mutex_lock(&slab_mutex); 1743 1744 list_for_each_entry(s, &slab_caches, list) 1745 init_cache_random_seq(s); 1746 1747 mutex_unlock(&slab_mutex); 1748 } 1749 1750 /* Get the next entry on the pre-computed freelist randomized */ 1751 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1752 unsigned long *pos, void *start, 1753 unsigned long page_limit, 1754 unsigned long freelist_count) 1755 { 1756 unsigned int idx; 1757 1758 /* 1759 * If the target page allocation failed, the number of objects on the 1760 * page might be smaller than the usual size defined by the cache. 1761 */ 1762 do { 1763 idx = s->random_seq[*pos]; 1764 *pos += 1; 1765 if (*pos >= freelist_count) 1766 *pos = 0; 1767 } while (unlikely(idx >= page_limit)); 1768 1769 return (char *)start + idx; 1770 } 1771 1772 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1773 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1774 { 1775 void *start; 1776 void *cur; 1777 void *next; 1778 unsigned long idx, pos, page_limit, freelist_count; 1779 1780 if (page->objects < 2 || !s->random_seq) 1781 return false; 1782 1783 freelist_count = oo_objects(s->oo); 1784 pos = get_random_int() % freelist_count; 1785 1786 page_limit = page->objects * s->size; 1787 start = fixup_red_left(s, page_address(page)); 1788 1789 /* First entry is used as the base of the freelist */ 1790 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1791 freelist_count); 1792 cur = setup_object(s, page, cur); 1793 page->freelist = cur; 1794 1795 for (idx = 1; idx < page->objects; idx++) { 1796 next = next_freelist_entry(s, page, &pos, start, page_limit, 1797 freelist_count); 1798 next = setup_object(s, page, next); 1799 set_freepointer(s, cur, next); 1800 cur = next; 1801 } 1802 set_freepointer(s, cur, NULL); 1803 1804 return true; 1805 } 1806 #else 1807 static inline int init_cache_random_seq(struct kmem_cache *s) 1808 { 1809 return 0; 1810 } 1811 static inline void init_freelist_randomization(void) { } 1812 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1813 { 1814 return false; 1815 } 1816 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1817 1818 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1819 { 1820 struct page *page; 1821 struct kmem_cache_order_objects oo = s->oo; 1822 gfp_t alloc_gfp; 1823 void *start, *p, *next; 1824 int idx; 1825 bool shuffle; 1826 1827 flags &= gfp_allowed_mask; 1828 1829 if (gfpflags_allow_blocking(flags)) 1830 local_irq_enable(); 1831 1832 flags |= s->allocflags; 1833 1834 /* 1835 * Let the initial higher-order allocation fail under memory pressure 1836 * so we fall-back to the minimum order allocation. 1837 */ 1838 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1839 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1840 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1841 1842 page = alloc_slab_page(s, alloc_gfp, node, oo); 1843 if (unlikely(!page)) { 1844 oo = s->min; 1845 alloc_gfp = flags; 1846 /* 1847 * Allocation may have failed due to fragmentation. 1848 * Try a lower order alloc if possible 1849 */ 1850 page = alloc_slab_page(s, alloc_gfp, node, oo); 1851 if (unlikely(!page)) 1852 goto out; 1853 stat(s, ORDER_FALLBACK); 1854 } 1855 1856 page->objects = oo_objects(oo); 1857 1858 account_slab_page(page, oo_order(oo), s, flags); 1859 1860 page->slab_cache = s; 1861 __SetPageSlab(page); 1862 if (page_is_pfmemalloc(page)) 1863 SetPageSlabPfmemalloc(page); 1864 1865 kasan_poison_slab(page); 1866 1867 start = page_address(page); 1868 1869 setup_page_debug(s, page, start); 1870 1871 shuffle = shuffle_freelist(s, page); 1872 1873 if (!shuffle) { 1874 start = fixup_red_left(s, start); 1875 start = setup_object(s, page, start); 1876 page->freelist = start; 1877 for (idx = 0, p = start; idx < page->objects - 1; idx++) { 1878 next = p + s->size; 1879 next = setup_object(s, page, next); 1880 set_freepointer(s, p, next); 1881 p = next; 1882 } 1883 set_freepointer(s, p, NULL); 1884 } 1885 1886 page->inuse = page->objects; 1887 page->frozen = 1; 1888 1889 out: 1890 if (gfpflags_allow_blocking(flags)) 1891 local_irq_disable(); 1892 if (!page) 1893 return NULL; 1894 1895 inc_slabs_node(s, page_to_nid(page), page->objects); 1896 1897 return page; 1898 } 1899 1900 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1901 { 1902 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1903 flags = kmalloc_fix_flags(flags); 1904 1905 return allocate_slab(s, 1906 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1907 } 1908 1909 static void __free_slab(struct kmem_cache *s, struct page *page) 1910 { 1911 int order = compound_order(page); 1912 int pages = 1 << order; 1913 1914 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 1915 void *p; 1916 1917 slab_pad_check(s, page); 1918 for_each_object(p, s, page_address(page), 1919 page->objects) 1920 check_object(s, page, p, SLUB_RED_INACTIVE); 1921 } 1922 1923 __ClearPageSlabPfmemalloc(page); 1924 __ClearPageSlab(page); 1925 /* In union with page->mapping where page allocator expects NULL */ 1926 page->slab_cache = NULL; 1927 if (current->reclaim_state) 1928 current->reclaim_state->reclaimed_slab += pages; 1929 unaccount_slab_page(page, order, s); 1930 __free_pages(page, order); 1931 } 1932 1933 static void rcu_free_slab(struct rcu_head *h) 1934 { 1935 struct page *page = container_of(h, struct page, rcu_head); 1936 1937 __free_slab(page->slab_cache, page); 1938 } 1939 1940 static void free_slab(struct kmem_cache *s, struct page *page) 1941 { 1942 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 1943 call_rcu(&page->rcu_head, rcu_free_slab); 1944 } else 1945 __free_slab(s, page); 1946 } 1947 1948 static void discard_slab(struct kmem_cache *s, struct page *page) 1949 { 1950 dec_slabs_node(s, page_to_nid(page), page->objects); 1951 free_slab(s, page); 1952 } 1953 1954 /* 1955 * Management of partially allocated slabs. 1956 */ 1957 static inline void 1958 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 1959 { 1960 n->nr_partial++; 1961 if (tail == DEACTIVATE_TO_TAIL) 1962 list_add_tail(&page->slab_list, &n->partial); 1963 else 1964 list_add(&page->slab_list, &n->partial); 1965 } 1966 1967 static inline void add_partial(struct kmem_cache_node *n, 1968 struct page *page, int tail) 1969 { 1970 lockdep_assert_held(&n->list_lock); 1971 __add_partial(n, page, tail); 1972 } 1973 1974 static inline void remove_partial(struct kmem_cache_node *n, 1975 struct page *page) 1976 { 1977 lockdep_assert_held(&n->list_lock); 1978 list_del(&page->slab_list); 1979 n->nr_partial--; 1980 } 1981 1982 /* 1983 * Remove slab from the partial list, freeze it and 1984 * return the pointer to the freelist. 1985 * 1986 * Returns a list of objects or NULL if it fails. 1987 */ 1988 static inline void *acquire_slab(struct kmem_cache *s, 1989 struct kmem_cache_node *n, struct page *page, 1990 int mode, int *objects) 1991 { 1992 void *freelist; 1993 unsigned long counters; 1994 struct page new; 1995 1996 lockdep_assert_held(&n->list_lock); 1997 1998 /* 1999 * Zap the freelist and set the frozen bit. 2000 * The old freelist is the list of objects for the 2001 * per cpu allocation list. 2002 */ 2003 freelist = page->freelist; 2004 counters = page->counters; 2005 new.counters = counters; 2006 *objects = new.objects - new.inuse; 2007 if (mode) { 2008 new.inuse = page->objects; 2009 new.freelist = NULL; 2010 } else { 2011 new.freelist = freelist; 2012 } 2013 2014 VM_BUG_ON(new.frozen); 2015 new.frozen = 1; 2016 2017 if (!__cmpxchg_double_slab(s, page, 2018 freelist, counters, 2019 new.freelist, new.counters, 2020 "acquire_slab")) 2021 return NULL; 2022 2023 remove_partial(n, page); 2024 WARN_ON(!freelist); 2025 return freelist; 2026 } 2027 2028 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 2029 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 2030 2031 /* 2032 * Try to allocate a partial slab from a specific node. 2033 */ 2034 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 2035 struct kmem_cache_cpu *c, gfp_t flags) 2036 { 2037 struct page *page, *page2; 2038 void *object = NULL; 2039 unsigned int available = 0; 2040 int objects; 2041 2042 /* 2043 * Racy check. If we mistakenly see no partial slabs then we 2044 * just allocate an empty slab. If we mistakenly try to get a 2045 * partial slab and there is none available then get_partial() 2046 * will return NULL. 2047 */ 2048 if (!n || !n->nr_partial) 2049 return NULL; 2050 2051 spin_lock(&n->list_lock); 2052 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { 2053 void *t; 2054 2055 if (!pfmemalloc_match(page, flags)) 2056 continue; 2057 2058 t = acquire_slab(s, n, page, object == NULL, &objects); 2059 if (!t) 2060 break; 2061 2062 available += objects; 2063 if (!object) { 2064 c->page = page; 2065 stat(s, ALLOC_FROM_PARTIAL); 2066 object = t; 2067 } else { 2068 put_cpu_partial(s, page, 0); 2069 stat(s, CPU_PARTIAL_NODE); 2070 } 2071 if (!kmem_cache_has_cpu_partial(s) 2072 || available > slub_cpu_partial(s) / 2) 2073 break; 2074 2075 } 2076 spin_unlock(&n->list_lock); 2077 return object; 2078 } 2079 2080 /* 2081 * Get a page from somewhere. Search in increasing NUMA distances. 2082 */ 2083 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2084 struct kmem_cache_cpu *c) 2085 { 2086 #ifdef CONFIG_NUMA 2087 struct zonelist *zonelist; 2088 struct zoneref *z; 2089 struct zone *zone; 2090 enum zone_type highest_zoneidx = gfp_zone(flags); 2091 void *object; 2092 unsigned int cpuset_mems_cookie; 2093 2094 /* 2095 * The defrag ratio allows a configuration of the tradeoffs between 2096 * inter node defragmentation and node local allocations. A lower 2097 * defrag_ratio increases the tendency to do local allocations 2098 * instead of attempting to obtain partial slabs from other nodes. 2099 * 2100 * If the defrag_ratio is set to 0 then kmalloc() always 2101 * returns node local objects. If the ratio is higher then kmalloc() 2102 * may return off node objects because partial slabs are obtained 2103 * from other nodes and filled up. 2104 * 2105 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2106 * (which makes defrag_ratio = 1000) then every (well almost) 2107 * allocation will first attempt to defrag slab caches on other nodes. 2108 * This means scanning over all nodes to look for partial slabs which 2109 * may be expensive if we do it every time we are trying to find a slab 2110 * with available objects. 2111 */ 2112 if (!s->remote_node_defrag_ratio || 2113 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2114 return NULL; 2115 2116 do { 2117 cpuset_mems_cookie = read_mems_allowed_begin(); 2118 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2119 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2120 struct kmem_cache_node *n; 2121 2122 n = get_node(s, zone_to_nid(zone)); 2123 2124 if (n && cpuset_zone_allowed(zone, flags) && 2125 n->nr_partial > s->min_partial) { 2126 object = get_partial_node(s, n, c, flags); 2127 if (object) { 2128 /* 2129 * Don't check read_mems_allowed_retry() 2130 * here - if mems_allowed was updated in 2131 * parallel, that was a harmless race 2132 * between allocation and the cpuset 2133 * update 2134 */ 2135 return object; 2136 } 2137 } 2138 } 2139 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2140 #endif /* CONFIG_NUMA */ 2141 return NULL; 2142 } 2143 2144 /* 2145 * Get a partial page, lock it and return it. 2146 */ 2147 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2148 struct kmem_cache_cpu *c) 2149 { 2150 void *object; 2151 int searchnode = node; 2152 2153 if (node == NUMA_NO_NODE) 2154 searchnode = numa_mem_id(); 2155 2156 object = get_partial_node(s, get_node(s, searchnode), c, flags); 2157 if (object || node != NUMA_NO_NODE) 2158 return object; 2159 2160 return get_any_partial(s, flags, c); 2161 } 2162 2163 #ifdef CONFIG_PREEMPTION 2164 /* 2165 * Calculate the next globally unique transaction for disambiguation 2166 * during cmpxchg. The transactions start with the cpu number and are then 2167 * incremented by CONFIG_NR_CPUS. 2168 */ 2169 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2170 #else 2171 /* 2172 * No preemption supported therefore also no need to check for 2173 * different cpus. 2174 */ 2175 #define TID_STEP 1 2176 #endif 2177 2178 static inline unsigned long next_tid(unsigned long tid) 2179 { 2180 return tid + TID_STEP; 2181 } 2182 2183 #ifdef SLUB_DEBUG_CMPXCHG 2184 static inline unsigned int tid_to_cpu(unsigned long tid) 2185 { 2186 return tid % TID_STEP; 2187 } 2188 2189 static inline unsigned long tid_to_event(unsigned long tid) 2190 { 2191 return tid / TID_STEP; 2192 } 2193 #endif 2194 2195 static inline unsigned int init_tid(int cpu) 2196 { 2197 return cpu; 2198 } 2199 2200 static inline void note_cmpxchg_failure(const char *n, 2201 const struct kmem_cache *s, unsigned long tid) 2202 { 2203 #ifdef SLUB_DEBUG_CMPXCHG 2204 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2205 2206 pr_info("%s %s: cmpxchg redo ", n, s->name); 2207 2208 #ifdef CONFIG_PREEMPTION 2209 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2210 pr_warn("due to cpu change %d -> %d\n", 2211 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2212 else 2213 #endif 2214 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2215 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2216 tid_to_event(tid), tid_to_event(actual_tid)); 2217 else 2218 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2219 actual_tid, tid, next_tid(tid)); 2220 #endif 2221 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2222 } 2223 2224 static void init_kmem_cache_cpus(struct kmem_cache *s) 2225 { 2226 int cpu; 2227 2228 for_each_possible_cpu(cpu) 2229 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 2230 } 2231 2232 /* 2233 * Remove the cpu slab 2234 */ 2235 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2236 void *freelist, struct kmem_cache_cpu *c) 2237 { 2238 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2239 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2240 int lock = 0, free_delta = 0; 2241 enum slab_modes l = M_NONE, m = M_NONE; 2242 void *nextfree, *freelist_iter, *freelist_tail; 2243 int tail = DEACTIVATE_TO_HEAD; 2244 struct page new; 2245 struct page old; 2246 2247 if (page->freelist) { 2248 stat(s, DEACTIVATE_REMOTE_FREES); 2249 tail = DEACTIVATE_TO_TAIL; 2250 } 2251 2252 /* 2253 * Stage one: Count the objects on cpu's freelist as free_delta and 2254 * remember the last object in freelist_tail for later splicing. 2255 */ 2256 freelist_tail = NULL; 2257 freelist_iter = freelist; 2258 while (freelist_iter) { 2259 nextfree = get_freepointer(s, freelist_iter); 2260 2261 /* 2262 * If 'nextfree' is invalid, it is possible that the object at 2263 * 'freelist_iter' is already corrupted. So isolate all objects 2264 * starting at 'freelist_iter' by skipping them. 2265 */ 2266 if (freelist_corrupted(s, page, &freelist_iter, nextfree)) 2267 break; 2268 2269 freelist_tail = freelist_iter; 2270 free_delta++; 2271 2272 freelist_iter = nextfree; 2273 } 2274 2275 /* 2276 * Stage two: Unfreeze the page while splicing the per-cpu 2277 * freelist to the head of page's freelist. 2278 * 2279 * Ensure that the page is unfrozen while the list presence 2280 * reflects the actual number of objects during unfreeze. 2281 * 2282 * We setup the list membership and then perform a cmpxchg 2283 * with the count. If there is a mismatch then the page 2284 * is not unfrozen but the page is on the wrong list. 2285 * 2286 * Then we restart the process which may have to remove 2287 * the page from the list that we just put it on again 2288 * because the number of objects in the slab may have 2289 * changed. 2290 */ 2291 redo: 2292 2293 old.freelist = READ_ONCE(page->freelist); 2294 old.counters = READ_ONCE(page->counters); 2295 VM_BUG_ON(!old.frozen); 2296 2297 /* Determine target state of the slab */ 2298 new.counters = old.counters; 2299 if (freelist_tail) { 2300 new.inuse -= free_delta; 2301 set_freepointer(s, freelist_tail, old.freelist); 2302 new.freelist = freelist; 2303 } else 2304 new.freelist = old.freelist; 2305 2306 new.frozen = 0; 2307 2308 if (!new.inuse && n->nr_partial >= s->min_partial) 2309 m = M_FREE; 2310 else if (new.freelist) { 2311 m = M_PARTIAL; 2312 if (!lock) { 2313 lock = 1; 2314 /* 2315 * Taking the spinlock removes the possibility 2316 * that acquire_slab() will see a slab page that 2317 * is frozen 2318 */ 2319 spin_lock(&n->list_lock); 2320 } 2321 } else { 2322 m = M_FULL; 2323 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) { 2324 lock = 1; 2325 /* 2326 * This also ensures that the scanning of full 2327 * slabs from diagnostic functions will not see 2328 * any frozen slabs. 2329 */ 2330 spin_lock(&n->list_lock); 2331 } 2332 } 2333 2334 if (l != m) { 2335 if (l == M_PARTIAL) 2336 remove_partial(n, page); 2337 else if (l == M_FULL) 2338 remove_full(s, n, page); 2339 2340 if (m == M_PARTIAL) 2341 add_partial(n, page, tail); 2342 else if (m == M_FULL) 2343 add_full(s, n, page); 2344 } 2345 2346 l = m; 2347 if (!__cmpxchg_double_slab(s, page, 2348 old.freelist, old.counters, 2349 new.freelist, new.counters, 2350 "unfreezing slab")) 2351 goto redo; 2352 2353 if (lock) 2354 spin_unlock(&n->list_lock); 2355 2356 if (m == M_PARTIAL) 2357 stat(s, tail); 2358 else if (m == M_FULL) 2359 stat(s, DEACTIVATE_FULL); 2360 else if (m == M_FREE) { 2361 stat(s, DEACTIVATE_EMPTY); 2362 discard_slab(s, page); 2363 stat(s, FREE_SLAB); 2364 } 2365 2366 c->page = NULL; 2367 c->freelist = NULL; 2368 } 2369 2370 /* 2371 * Unfreeze all the cpu partial slabs. 2372 * 2373 * This function must be called with interrupts disabled 2374 * for the cpu using c (or some other guarantee must be there 2375 * to guarantee no concurrent accesses). 2376 */ 2377 static void unfreeze_partials(struct kmem_cache *s, 2378 struct kmem_cache_cpu *c) 2379 { 2380 #ifdef CONFIG_SLUB_CPU_PARTIAL 2381 struct kmem_cache_node *n = NULL, *n2 = NULL; 2382 struct page *page, *discard_page = NULL; 2383 2384 while ((page = slub_percpu_partial(c))) { 2385 struct page new; 2386 struct page old; 2387 2388 slub_set_percpu_partial(c, page); 2389 2390 n2 = get_node(s, page_to_nid(page)); 2391 if (n != n2) { 2392 if (n) 2393 spin_unlock(&n->list_lock); 2394 2395 n = n2; 2396 spin_lock(&n->list_lock); 2397 } 2398 2399 do { 2400 2401 old.freelist = page->freelist; 2402 old.counters = page->counters; 2403 VM_BUG_ON(!old.frozen); 2404 2405 new.counters = old.counters; 2406 new.freelist = old.freelist; 2407 2408 new.frozen = 0; 2409 2410 } while (!__cmpxchg_double_slab(s, page, 2411 old.freelist, old.counters, 2412 new.freelist, new.counters, 2413 "unfreezing slab")); 2414 2415 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2416 page->next = discard_page; 2417 discard_page = page; 2418 } else { 2419 add_partial(n, page, DEACTIVATE_TO_TAIL); 2420 stat(s, FREE_ADD_PARTIAL); 2421 } 2422 } 2423 2424 if (n) 2425 spin_unlock(&n->list_lock); 2426 2427 while (discard_page) { 2428 page = discard_page; 2429 discard_page = discard_page->next; 2430 2431 stat(s, DEACTIVATE_EMPTY); 2432 discard_slab(s, page); 2433 stat(s, FREE_SLAB); 2434 } 2435 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2436 } 2437 2438 /* 2439 * Put a page that was just frozen (in __slab_free|get_partial_node) into a 2440 * partial page slot if available. 2441 * 2442 * If we did not find a slot then simply move all the partials to the 2443 * per node partial list. 2444 */ 2445 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2446 { 2447 #ifdef CONFIG_SLUB_CPU_PARTIAL 2448 struct page *oldpage; 2449 int pages; 2450 int pobjects; 2451 2452 preempt_disable(); 2453 do { 2454 pages = 0; 2455 pobjects = 0; 2456 oldpage = this_cpu_read(s->cpu_slab->partial); 2457 2458 if (oldpage) { 2459 pobjects = oldpage->pobjects; 2460 pages = oldpage->pages; 2461 if (drain && pobjects > slub_cpu_partial(s)) { 2462 unsigned long flags; 2463 /* 2464 * partial array is full. Move the existing 2465 * set to the per node partial list. 2466 */ 2467 local_irq_save(flags); 2468 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2469 local_irq_restore(flags); 2470 oldpage = NULL; 2471 pobjects = 0; 2472 pages = 0; 2473 stat(s, CPU_PARTIAL_DRAIN); 2474 } 2475 } 2476 2477 pages++; 2478 pobjects += page->objects - page->inuse; 2479 2480 page->pages = pages; 2481 page->pobjects = pobjects; 2482 page->next = oldpage; 2483 2484 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) 2485 != oldpage); 2486 if (unlikely(!slub_cpu_partial(s))) { 2487 unsigned long flags; 2488 2489 local_irq_save(flags); 2490 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2491 local_irq_restore(flags); 2492 } 2493 preempt_enable(); 2494 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2495 } 2496 2497 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2498 { 2499 stat(s, CPUSLAB_FLUSH); 2500 deactivate_slab(s, c->page, c->freelist, c); 2501 2502 c->tid = next_tid(c->tid); 2503 } 2504 2505 /* 2506 * Flush cpu slab. 2507 * 2508 * Called from IPI handler with interrupts disabled. 2509 */ 2510 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2511 { 2512 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2513 2514 if (c->page) 2515 flush_slab(s, c); 2516 2517 unfreeze_partials(s, c); 2518 } 2519 2520 static void flush_cpu_slab(void *d) 2521 { 2522 struct kmem_cache *s = d; 2523 2524 __flush_cpu_slab(s, smp_processor_id()); 2525 } 2526 2527 static bool has_cpu_slab(int cpu, void *info) 2528 { 2529 struct kmem_cache *s = info; 2530 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2531 2532 return c->page || slub_percpu_partial(c); 2533 } 2534 2535 static void flush_all(struct kmem_cache *s) 2536 { 2537 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); 2538 } 2539 2540 /* 2541 * Use the cpu notifier to insure that the cpu slabs are flushed when 2542 * necessary. 2543 */ 2544 static int slub_cpu_dead(unsigned int cpu) 2545 { 2546 struct kmem_cache *s; 2547 unsigned long flags; 2548 2549 mutex_lock(&slab_mutex); 2550 list_for_each_entry(s, &slab_caches, list) { 2551 local_irq_save(flags); 2552 __flush_cpu_slab(s, cpu); 2553 local_irq_restore(flags); 2554 } 2555 mutex_unlock(&slab_mutex); 2556 return 0; 2557 } 2558 2559 /* 2560 * Check if the objects in a per cpu structure fit numa 2561 * locality expectations. 2562 */ 2563 static inline int node_match(struct page *page, int node) 2564 { 2565 #ifdef CONFIG_NUMA 2566 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2567 return 0; 2568 #endif 2569 return 1; 2570 } 2571 2572 #ifdef CONFIG_SLUB_DEBUG 2573 static int count_free(struct page *page) 2574 { 2575 return page->objects - page->inuse; 2576 } 2577 2578 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2579 { 2580 return atomic_long_read(&n->total_objects); 2581 } 2582 #endif /* CONFIG_SLUB_DEBUG */ 2583 2584 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2585 static unsigned long count_partial(struct kmem_cache_node *n, 2586 int (*get_count)(struct page *)) 2587 { 2588 unsigned long flags; 2589 unsigned long x = 0; 2590 struct page *page; 2591 2592 spin_lock_irqsave(&n->list_lock, flags); 2593 list_for_each_entry(page, &n->partial, slab_list) 2594 x += get_count(page); 2595 spin_unlock_irqrestore(&n->list_lock, flags); 2596 return x; 2597 } 2598 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2599 2600 static noinline void 2601 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2602 { 2603 #ifdef CONFIG_SLUB_DEBUG 2604 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2605 DEFAULT_RATELIMIT_BURST); 2606 int node; 2607 struct kmem_cache_node *n; 2608 2609 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2610 return; 2611 2612 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2613 nid, gfpflags, &gfpflags); 2614 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2615 s->name, s->object_size, s->size, oo_order(s->oo), 2616 oo_order(s->min)); 2617 2618 if (oo_order(s->min) > get_order(s->object_size)) 2619 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2620 s->name); 2621 2622 for_each_kmem_cache_node(s, node, n) { 2623 unsigned long nr_slabs; 2624 unsigned long nr_objs; 2625 unsigned long nr_free; 2626 2627 nr_free = count_partial(n, count_free); 2628 nr_slabs = node_nr_slabs(n); 2629 nr_objs = node_nr_objs(n); 2630 2631 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2632 node, nr_slabs, nr_objs, nr_free); 2633 } 2634 #endif 2635 } 2636 2637 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2638 int node, struct kmem_cache_cpu **pc) 2639 { 2640 void *freelist; 2641 struct kmem_cache_cpu *c = *pc; 2642 struct page *page; 2643 2644 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2645 2646 freelist = get_partial(s, flags, node, c); 2647 2648 if (freelist) 2649 return freelist; 2650 2651 page = new_slab(s, flags, node); 2652 if (page) { 2653 c = raw_cpu_ptr(s->cpu_slab); 2654 if (c->page) 2655 flush_slab(s, c); 2656 2657 /* 2658 * No other reference to the page yet so we can 2659 * muck around with it freely without cmpxchg 2660 */ 2661 freelist = page->freelist; 2662 page->freelist = NULL; 2663 2664 stat(s, ALLOC_SLAB); 2665 c->page = page; 2666 *pc = c; 2667 } 2668 2669 return freelist; 2670 } 2671 2672 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2673 { 2674 if (unlikely(PageSlabPfmemalloc(page))) 2675 return gfp_pfmemalloc_allowed(gfpflags); 2676 2677 return true; 2678 } 2679 2680 /* 2681 * Check the page->freelist of a page and either transfer the freelist to the 2682 * per cpu freelist or deactivate the page. 2683 * 2684 * The page is still frozen if the return value is not NULL. 2685 * 2686 * If this function returns NULL then the page has been unfrozen. 2687 * 2688 * This function must be called with interrupt disabled. 2689 */ 2690 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2691 { 2692 struct page new; 2693 unsigned long counters; 2694 void *freelist; 2695 2696 do { 2697 freelist = page->freelist; 2698 counters = page->counters; 2699 2700 new.counters = counters; 2701 VM_BUG_ON(!new.frozen); 2702 2703 new.inuse = page->objects; 2704 new.frozen = freelist != NULL; 2705 2706 } while (!__cmpxchg_double_slab(s, page, 2707 freelist, counters, 2708 NULL, new.counters, 2709 "get_freelist")); 2710 2711 return freelist; 2712 } 2713 2714 /* 2715 * Slow path. The lockless freelist is empty or we need to perform 2716 * debugging duties. 2717 * 2718 * Processing is still very fast if new objects have been freed to the 2719 * regular freelist. In that case we simply take over the regular freelist 2720 * as the lockless freelist and zap the regular freelist. 2721 * 2722 * If that is not working then we fall back to the partial lists. We take the 2723 * first element of the freelist as the object to allocate now and move the 2724 * rest of the freelist to the lockless freelist. 2725 * 2726 * And if we were unable to get a new slab from the partial slab lists then 2727 * we need to allocate a new slab. This is the slowest path since it involves 2728 * a call to the page allocator and the setup of a new slab. 2729 * 2730 * Version of __slab_alloc to use when we know that interrupts are 2731 * already disabled (which is the case for bulk allocation). 2732 */ 2733 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2734 unsigned long addr, struct kmem_cache_cpu *c) 2735 { 2736 void *freelist; 2737 struct page *page; 2738 2739 stat(s, ALLOC_SLOWPATH); 2740 2741 page = c->page; 2742 if (!page) { 2743 /* 2744 * if the node is not online or has no normal memory, just 2745 * ignore the node constraint 2746 */ 2747 if (unlikely(node != NUMA_NO_NODE && 2748 !node_isset(node, slab_nodes))) 2749 node = NUMA_NO_NODE; 2750 goto new_slab; 2751 } 2752 redo: 2753 2754 if (unlikely(!node_match(page, node))) { 2755 /* 2756 * same as above but node_match() being false already 2757 * implies node != NUMA_NO_NODE 2758 */ 2759 if (!node_isset(node, slab_nodes)) { 2760 node = NUMA_NO_NODE; 2761 goto redo; 2762 } else { 2763 stat(s, ALLOC_NODE_MISMATCH); 2764 deactivate_slab(s, page, c->freelist, c); 2765 goto new_slab; 2766 } 2767 } 2768 2769 /* 2770 * By rights, we should be searching for a slab page that was 2771 * PFMEMALLOC but right now, we are losing the pfmemalloc 2772 * information when the page leaves the per-cpu allocator 2773 */ 2774 if (unlikely(!pfmemalloc_match(page, gfpflags))) { 2775 deactivate_slab(s, page, c->freelist, c); 2776 goto new_slab; 2777 } 2778 2779 /* must check again c->freelist in case of cpu migration or IRQ */ 2780 freelist = c->freelist; 2781 if (freelist) 2782 goto load_freelist; 2783 2784 freelist = get_freelist(s, page); 2785 2786 if (!freelist) { 2787 c->page = NULL; 2788 stat(s, DEACTIVATE_BYPASS); 2789 goto new_slab; 2790 } 2791 2792 stat(s, ALLOC_REFILL); 2793 2794 load_freelist: 2795 /* 2796 * freelist is pointing to the list of objects to be used. 2797 * page is pointing to the page from which the objects are obtained. 2798 * That page must be frozen for per cpu allocations to work. 2799 */ 2800 VM_BUG_ON(!c->page->frozen); 2801 c->freelist = get_freepointer(s, freelist); 2802 c->tid = next_tid(c->tid); 2803 return freelist; 2804 2805 new_slab: 2806 2807 if (slub_percpu_partial(c)) { 2808 page = c->page = slub_percpu_partial(c); 2809 slub_set_percpu_partial(c, page); 2810 stat(s, CPU_PARTIAL_ALLOC); 2811 goto redo; 2812 } 2813 2814 freelist = new_slab_objects(s, gfpflags, node, &c); 2815 2816 if (unlikely(!freelist)) { 2817 slab_out_of_memory(s, gfpflags, node); 2818 return NULL; 2819 } 2820 2821 page = c->page; 2822 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) 2823 goto load_freelist; 2824 2825 /* Only entered in the debug case */ 2826 if (kmem_cache_debug(s) && 2827 !alloc_debug_processing(s, page, freelist, addr)) 2828 goto new_slab; /* Slab failed checks. Next slab needed */ 2829 2830 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2831 return freelist; 2832 } 2833 2834 /* 2835 * Another one that disabled interrupt and compensates for possible 2836 * cpu changes by refetching the per cpu area pointer. 2837 */ 2838 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2839 unsigned long addr, struct kmem_cache_cpu *c) 2840 { 2841 void *p; 2842 unsigned long flags; 2843 2844 local_irq_save(flags); 2845 #ifdef CONFIG_PREEMPTION 2846 /* 2847 * We may have been preempted and rescheduled on a different 2848 * cpu before disabling interrupts. Need to reload cpu area 2849 * pointer. 2850 */ 2851 c = this_cpu_ptr(s->cpu_slab); 2852 #endif 2853 2854 p = ___slab_alloc(s, gfpflags, node, addr, c); 2855 local_irq_restore(flags); 2856 return p; 2857 } 2858 2859 /* 2860 * If the object has been wiped upon free, make sure it's fully initialized by 2861 * zeroing out freelist pointer. 2862 */ 2863 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 2864 void *obj) 2865 { 2866 if (unlikely(slab_want_init_on_free(s)) && obj) 2867 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 2868 0, sizeof(void *)); 2869 } 2870 2871 /* 2872 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2873 * have the fastpath folded into their functions. So no function call 2874 * overhead for requests that can be satisfied on the fastpath. 2875 * 2876 * The fastpath works by first checking if the lockless freelist can be used. 2877 * If not then __slab_alloc is called for slow processing. 2878 * 2879 * Otherwise we can simply pick the next object from the lockless free list. 2880 */ 2881 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2882 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 2883 { 2884 void *object; 2885 struct kmem_cache_cpu *c; 2886 struct page *page; 2887 unsigned long tid; 2888 struct obj_cgroup *objcg = NULL; 2889 bool init = false; 2890 2891 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); 2892 if (!s) 2893 return NULL; 2894 2895 object = kfence_alloc(s, orig_size, gfpflags); 2896 if (unlikely(object)) 2897 goto out; 2898 2899 redo: 2900 /* 2901 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2902 * enabled. We may switch back and forth between cpus while 2903 * reading from one cpu area. That does not matter as long 2904 * as we end up on the original cpu again when doing the cmpxchg. 2905 * 2906 * We should guarantee that tid and kmem_cache are retrieved on 2907 * the same cpu. It could be different if CONFIG_PREEMPTION so we need 2908 * to check if it is matched or not. 2909 */ 2910 do { 2911 tid = this_cpu_read(s->cpu_slab->tid); 2912 c = raw_cpu_ptr(s->cpu_slab); 2913 } while (IS_ENABLED(CONFIG_PREEMPTION) && 2914 unlikely(tid != READ_ONCE(c->tid))); 2915 2916 /* 2917 * Irqless object alloc/free algorithm used here depends on sequence 2918 * of fetching cpu_slab's data. tid should be fetched before anything 2919 * on c to guarantee that object and page associated with previous tid 2920 * won't be used with current tid. If we fetch tid first, object and 2921 * page could be one associated with next tid and our alloc/free 2922 * request will be failed. In this case, we will retry. So, no problem. 2923 */ 2924 barrier(); 2925 2926 /* 2927 * The transaction ids are globally unique per cpu and per operation on 2928 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2929 * occurs on the right processor and that there was no operation on the 2930 * linked list in between. 2931 */ 2932 2933 object = c->freelist; 2934 page = c->page; 2935 if (unlikely(!object || !page || !node_match(page, node))) { 2936 object = __slab_alloc(s, gfpflags, node, addr, c); 2937 } else { 2938 void *next_object = get_freepointer_safe(s, object); 2939 2940 /* 2941 * The cmpxchg will only match if there was no additional 2942 * operation and if we are on the right processor. 2943 * 2944 * The cmpxchg does the following atomically (without lock 2945 * semantics!) 2946 * 1. Relocate first pointer to the current per cpu area. 2947 * 2. Verify that tid and freelist have not been changed 2948 * 3. If they were not changed replace tid and freelist 2949 * 2950 * Since this is without lock semantics the protection is only 2951 * against code executing on this cpu *not* from access by 2952 * other cpus. 2953 */ 2954 if (unlikely(!this_cpu_cmpxchg_double( 2955 s->cpu_slab->freelist, s->cpu_slab->tid, 2956 object, tid, 2957 next_object, next_tid(tid)))) { 2958 2959 note_cmpxchg_failure("slab_alloc", s, tid); 2960 goto redo; 2961 } 2962 prefetch_freepointer(s, next_object); 2963 stat(s, ALLOC_FASTPATH); 2964 } 2965 2966 maybe_wipe_obj_freeptr(s, object); 2967 init = slab_want_init_on_alloc(gfpflags, s); 2968 2969 out: 2970 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); 2971 2972 return object; 2973 } 2974 2975 static __always_inline void *slab_alloc(struct kmem_cache *s, 2976 gfp_t gfpflags, unsigned long addr, size_t orig_size) 2977 { 2978 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); 2979 } 2980 2981 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2982 { 2983 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); 2984 2985 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 2986 s->size, gfpflags); 2987 2988 return ret; 2989 } 2990 EXPORT_SYMBOL(kmem_cache_alloc); 2991 2992 #ifdef CONFIG_TRACING 2993 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2994 { 2995 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); 2996 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2997 ret = kasan_kmalloc(s, ret, size, gfpflags); 2998 return ret; 2999 } 3000 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3001 #endif 3002 3003 #ifdef CONFIG_NUMA 3004 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3005 { 3006 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); 3007 3008 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3009 s->object_size, s->size, gfpflags, node); 3010 3011 return ret; 3012 } 3013 EXPORT_SYMBOL(kmem_cache_alloc_node); 3014 3015 #ifdef CONFIG_TRACING 3016 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 3017 gfp_t gfpflags, 3018 int node, size_t size) 3019 { 3020 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); 3021 3022 trace_kmalloc_node(_RET_IP_, ret, 3023 size, s->size, gfpflags, node); 3024 3025 ret = kasan_kmalloc(s, ret, size, gfpflags); 3026 return ret; 3027 } 3028 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3029 #endif 3030 #endif /* CONFIG_NUMA */ 3031 3032 /* 3033 * Slow path handling. This may still be called frequently since objects 3034 * have a longer lifetime than the cpu slabs in most processing loads. 3035 * 3036 * So we still attempt to reduce cache line usage. Just take the slab 3037 * lock and free the item. If there is no additional partial page 3038 * handling required then we can return immediately. 3039 */ 3040 static void __slab_free(struct kmem_cache *s, struct page *page, 3041 void *head, void *tail, int cnt, 3042 unsigned long addr) 3043 3044 { 3045 void *prior; 3046 int was_frozen; 3047 struct page new; 3048 unsigned long counters; 3049 struct kmem_cache_node *n = NULL; 3050 unsigned long flags; 3051 3052 stat(s, FREE_SLOWPATH); 3053 3054 if (kfence_free(head)) 3055 return; 3056 3057 if (kmem_cache_debug(s) && 3058 !free_debug_processing(s, page, head, tail, cnt, addr)) 3059 return; 3060 3061 do { 3062 if (unlikely(n)) { 3063 spin_unlock_irqrestore(&n->list_lock, flags); 3064 n = NULL; 3065 } 3066 prior = page->freelist; 3067 counters = page->counters; 3068 set_freepointer(s, tail, prior); 3069 new.counters = counters; 3070 was_frozen = new.frozen; 3071 new.inuse -= cnt; 3072 if ((!new.inuse || !prior) && !was_frozen) { 3073 3074 if (kmem_cache_has_cpu_partial(s) && !prior) { 3075 3076 /* 3077 * Slab was on no list before and will be 3078 * partially empty 3079 * We can defer the list move and instead 3080 * freeze it. 3081 */ 3082 new.frozen = 1; 3083 3084 } else { /* Needs to be taken off a list */ 3085 3086 n = get_node(s, page_to_nid(page)); 3087 /* 3088 * Speculatively acquire the list_lock. 3089 * If the cmpxchg does not succeed then we may 3090 * drop the list_lock without any processing. 3091 * 3092 * Otherwise the list_lock will synchronize with 3093 * other processors updating the list of slabs. 3094 */ 3095 spin_lock_irqsave(&n->list_lock, flags); 3096 3097 } 3098 } 3099 3100 } while (!cmpxchg_double_slab(s, page, 3101 prior, counters, 3102 head, new.counters, 3103 "__slab_free")); 3104 3105 if (likely(!n)) { 3106 3107 if (likely(was_frozen)) { 3108 /* 3109 * The list lock was not taken therefore no list 3110 * activity can be necessary. 3111 */ 3112 stat(s, FREE_FROZEN); 3113 } else if (new.frozen) { 3114 /* 3115 * If we just froze the page then put it onto the 3116 * per cpu partial list. 3117 */ 3118 put_cpu_partial(s, page, 1); 3119 stat(s, CPU_PARTIAL_FREE); 3120 } 3121 3122 return; 3123 } 3124 3125 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3126 goto slab_empty; 3127 3128 /* 3129 * Objects left in the slab. If it was not on the partial list before 3130 * then add it. 3131 */ 3132 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3133 remove_full(s, n, page); 3134 add_partial(n, page, DEACTIVATE_TO_TAIL); 3135 stat(s, FREE_ADD_PARTIAL); 3136 } 3137 spin_unlock_irqrestore(&n->list_lock, flags); 3138 return; 3139 3140 slab_empty: 3141 if (prior) { 3142 /* 3143 * Slab on the partial list. 3144 */ 3145 remove_partial(n, page); 3146 stat(s, FREE_REMOVE_PARTIAL); 3147 } else { 3148 /* Slab must be on the full list */ 3149 remove_full(s, n, page); 3150 } 3151 3152 spin_unlock_irqrestore(&n->list_lock, flags); 3153 stat(s, FREE_SLAB); 3154 discard_slab(s, page); 3155 } 3156 3157 /* 3158 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3159 * can perform fastpath freeing without additional function calls. 3160 * 3161 * The fastpath is only possible if we are freeing to the current cpu slab 3162 * of this processor. This typically the case if we have just allocated 3163 * the item before. 3164 * 3165 * If fastpath is not possible then fall back to __slab_free where we deal 3166 * with all sorts of special processing. 3167 * 3168 * Bulk free of a freelist with several objects (all pointing to the 3169 * same page) possible by specifying head and tail ptr, plus objects 3170 * count (cnt). Bulk free indicated by tail pointer being set. 3171 */ 3172 static __always_inline void do_slab_free(struct kmem_cache *s, 3173 struct page *page, void *head, void *tail, 3174 int cnt, unsigned long addr) 3175 { 3176 void *tail_obj = tail ? : head; 3177 struct kmem_cache_cpu *c; 3178 unsigned long tid; 3179 3180 memcg_slab_free_hook(s, &head, 1); 3181 redo: 3182 /* 3183 * Determine the currently cpus per cpu slab. 3184 * The cpu may change afterward. However that does not matter since 3185 * data is retrieved via this pointer. If we are on the same cpu 3186 * during the cmpxchg then the free will succeed. 3187 */ 3188 do { 3189 tid = this_cpu_read(s->cpu_slab->tid); 3190 c = raw_cpu_ptr(s->cpu_slab); 3191 } while (IS_ENABLED(CONFIG_PREEMPTION) && 3192 unlikely(tid != READ_ONCE(c->tid))); 3193 3194 /* Same with comment on barrier() in slab_alloc_node() */ 3195 barrier(); 3196 3197 if (likely(page == c->page)) { 3198 void **freelist = READ_ONCE(c->freelist); 3199 3200 set_freepointer(s, tail_obj, freelist); 3201 3202 if (unlikely(!this_cpu_cmpxchg_double( 3203 s->cpu_slab->freelist, s->cpu_slab->tid, 3204 freelist, tid, 3205 head, next_tid(tid)))) { 3206 3207 note_cmpxchg_failure("slab_free", s, tid); 3208 goto redo; 3209 } 3210 stat(s, FREE_FASTPATH); 3211 } else 3212 __slab_free(s, page, head, tail_obj, cnt, addr); 3213 3214 } 3215 3216 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 3217 void *head, void *tail, int cnt, 3218 unsigned long addr) 3219 { 3220 /* 3221 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3222 * to remove objects, whose reuse must be delayed. 3223 */ 3224 if (slab_free_freelist_hook(s, &head, &tail)) 3225 do_slab_free(s, page, head, tail, cnt, addr); 3226 } 3227 3228 #ifdef CONFIG_KASAN_GENERIC 3229 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3230 { 3231 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 3232 } 3233 #endif 3234 3235 void kmem_cache_free(struct kmem_cache *s, void *x) 3236 { 3237 s = cache_from_obj(s, x); 3238 if (!s) 3239 return; 3240 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3241 trace_kmem_cache_free(_RET_IP_, x, s->name); 3242 } 3243 EXPORT_SYMBOL(kmem_cache_free); 3244 3245 struct detached_freelist { 3246 struct page *page; 3247 void *tail; 3248 void *freelist; 3249 int cnt; 3250 struct kmem_cache *s; 3251 }; 3252 3253 /* 3254 * This function progressively scans the array with free objects (with 3255 * a limited look ahead) and extract objects belonging to the same 3256 * page. It builds a detached freelist directly within the given 3257 * page/objects. This can happen without any need for 3258 * synchronization, because the objects are owned by running process. 3259 * The freelist is build up as a single linked list in the objects. 3260 * The idea is, that this detached freelist can then be bulk 3261 * transferred to the real freelist(s), but only requiring a single 3262 * synchronization primitive. Look ahead in the array is limited due 3263 * to performance reasons. 3264 */ 3265 static inline 3266 int build_detached_freelist(struct kmem_cache *s, size_t size, 3267 void **p, struct detached_freelist *df) 3268 { 3269 size_t first_skipped_index = 0; 3270 int lookahead = 3; 3271 void *object; 3272 struct page *page; 3273 3274 /* Always re-init detached_freelist */ 3275 df->page = NULL; 3276 3277 do { 3278 object = p[--size]; 3279 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3280 } while (!object && size); 3281 3282 if (!object) 3283 return 0; 3284 3285 page = virt_to_head_page(object); 3286 if (!s) { 3287 /* Handle kalloc'ed objects */ 3288 if (unlikely(!PageSlab(page))) { 3289 BUG_ON(!PageCompound(page)); 3290 kfree_hook(object); 3291 __free_pages(page, compound_order(page)); 3292 p[size] = NULL; /* mark object processed */ 3293 return size; 3294 } 3295 /* Derive kmem_cache from object */ 3296 df->s = page->slab_cache; 3297 } else { 3298 df->s = cache_from_obj(s, object); /* Support for memcg */ 3299 } 3300 3301 if (is_kfence_address(object)) { 3302 slab_free_hook(df->s, object, false); 3303 __kfence_free(object); 3304 p[size] = NULL; /* mark object processed */ 3305 return size; 3306 } 3307 3308 /* Start new detached freelist */ 3309 df->page = page; 3310 set_freepointer(df->s, object, NULL); 3311 df->tail = object; 3312 df->freelist = object; 3313 p[size] = NULL; /* mark object processed */ 3314 df->cnt = 1; 3315 3316 while (size) { 3317 object = p[--size]; 3318 if (!object) 3319 continue; /* Skip processed objects */ 3320 3321 /* df->page is always set at this point */ 3322 if (df->page == virt_to_head_page(object)) { 3323 /* Opportunity build freelist */ 3324 set_freepointer(df->s, object, df->freelist); 3325 df->freelist = object; 3326 df->cnt++; 3327 p[size] = NULL; /* mark object processed */ 3328 3329 continue; 3330 } 3331 3332 /* Limit look ahead search */ 3333 if (!--lookahead) 3334 break; 3335 3336 if (!first_skipped_index) 3337 first_skipped_index = size + 1; 3338 } 3339 3340 return first_skipped_index; 3341 } 3342 3343 /* Note that interrupts must be enabled when calling this function. */ 3344 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3345 { 3346 if (WARN_ON(!size)) 3347 return; 3348 3349 memcg_slab_free_hook(s, p, size); 3350 do { 3351 struct detached_freelist df; 3352 3353 size = build_detached_freelist(s, size, p, &df); 3354 if (!df.page) 3355 continue; 3356 3357 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 3358 } while (likely(size)); 3359 } 3360 EXPORT_SYMBOL(kmem_cache_free_bulk); 3361 3362 /* Note that interrupts must be enabled when calling this function. */ 3363 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3364 void **p) 3365 { 3366 struct kmem_cache_cpu *c; 3367 int i; 3368 struct obj_cgroup *objcg = NULL; 3369 3370 /* memcg and kmem_cache debug support */ 3371 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3372 if (unlikely(!s)) 3373 return false; 3374 /* 3375 * Drain objects in the per cpu slab, while disabling local 3376 * IRQs, which protects against PREEMPT and interrupts 3377 * handlers invoking normal fastpath. 3378 */ 3379 local_irq_disable(); 3380 c = this_cpu_ptr(s->cpu_slab); 3381 3382 for (i = 0; i < size; i++) { 3383 void *object = kfence_alloc(s, s->object_size, flags); 3384 3385 if (unlikely(object)) { 3386 p[i] = object; 3387 continue; 3388 } 3389 3390 object = c->freelist; 3391 if (unlikely(!object)) { 3392 /* 3393 * We may have removed an object from c->freelist using 3394 * the fastpath in the previous iteration; in that case, 3395 * c->tid has not been bumped yet. 3396 * Since ___slab_alloc() may reenable interrupts while 3397 * allocating memory, we should bump c->tid now. 3398 */ 3399 c->tid = next_tid(c->tid); 3400 3401 /* 3402 * Invoking slow path likely have side-effect 3403 * of re-populating per CPU c->freelist 3404 */ 3405 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3406 _RET_IP_, c); 3407 if (unlikely(!p[i])) 3408 goto error; 3409 3410 c = this_cpu_ptr(s->cpu_slab); 3411 maybe_wipe_obj_freeptr(s, p[i]); 3412 3413 continue; /* goto for-loop */ 3414 } 3415 c->freelist = get_freepointer(s, object); 3416 p[i] = object; 3417 maybe_wipe_obj_freeptr(s, p[i]); 3418 } 3419 c->tid = next_tid(c->tid); 3420 local_irq_enable(); 3421 3422 /* 3423 * memcg and kmem_cache debug support and memory initialization. 3424 * Done outside of the IRQ disabled fastpath loop. 3425 */ 3426 slab_post_alloc_hook(s, objcg, flags, size, p, 3427 slab_want_init_on_alloc(flags, s)); 3428 return i; 3429 error: 3430 local_irq_enable(); 3431 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3432 __kmem_cache_free_bulk(s, i, p); 3433 return 0; 3434 } 3435 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3436 3437 3438 /* 3439 * Object placement in a slab is made very easy because we always start at 3440 * offset 0. If we tune the size of the object to the alignment then we can 3441 * get the required alignment by putting one properly sized object after 3442 * another. 3443 * 3444 * Notice that the allocation order determines the sizes of the per cpu 3445 * caches. Each processor has always one slab available for allocations. 3446 * Increasing the allocation order reduces the number of times that slabs 3447 * must be moved on and off the partial lists and is therefore a factor in 3448 * locking overhead. 3449 */ 3450 3451 /* 3452 * Minimum / Maximum order of slab pages. This influences locking overhead 3453 * and slab fragmentation. A higher order reduces the number of partial slabs 3454 * and increases the number of allocations possible without having to 3455 * take the list_lock. 3456 */ 3457 static unsigned int slub_min_order; 3458 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3459 static unsigned int slub_min_objects; 3460 3461 /* 3462 * Calculate the order of allocation given an slab object size. 3463 * 3464 * The order of allocation has significant impact on performance and other 3465 * system components. Generally order 0 allocations should be preferred since 3466 * order 0 does not cause fragmentation in the page allocator. Larger objects 3467 * be problematic to put into order 0 slabs because there may be too much 3468 * unused space left. We go to a higher order if more than 1/16th of the slab 3469 * would be wasted. 3470 * 3471 * In order to reach satisfactory performance we must ensure that a minimum 3472 * number of objects is in one slab. Otherwise we may generate too much 3473 * activity on the partial lists which requires taking the list_lock. This is 3474 * less a concern for large slabs though which are rarely used. 3475 * 3476 * slub_max_order specifies the order where we begin to stop considering the 3477 * number of objects in a slab as critical. If we reach slub_max_order then 3478 * we try to keep the page order as low as possible. So we accept more waste 3479 * of space in favor of a small page order. 3480 * 3481 * Higher order allocations also allow the placement of more objects in a 3482 * slab and thereby reduce object handling overhead. If the user has 3483 * requested a higher minimum order then we start with that one instead of 3484 * the smallest order which will fit the object. 3485 */ 3486 static inline unsigned int slab_order(unsigned int size, 3487 unsigned int min_objects, unsigned int max_order, 3488 unsigned int fract_leftover) 3489 { 3490 unsigned int min_order = slub_min_order; 3491 unsigned int order; 3492 3493 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3494 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3495 3496 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3497 order <= max_order; order++) { 3498 3499 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3500 unsigned int rem; 3501 3502 rem = slab_size % size; 3503 3504 if (rem <= slab_size / fract_leftover) 3505 break; 3506 } 3507 3508 return order; 3509 } 3510 3511 static inline int calculate_order(unsigned int size) 3512 { 3513 unsigned int order; 3514 unsigned int min_objects; 3515 unsigned int max_objects; 3516 unsigned int nr_cpus; 3517 3518 /* 3519 * Attempt to find best configuration for a slab. This 3520 * works by first attempting to generate a layout with 3521 * the best configuration and backing off gradually. 3522 * 3523 * First we increase the acceptable waste in a slab. Then 3524 * we reduce the minimum objects required in a slab. 3525 */ 3526 min_objects = slub_min_objects; 3527 if (!min_objects) { 3528 /* 3529 * Some architectures will only update present cpus when 3530 * onlining them, so don't trust the number if it's just 1. But 3531 * we also don't want to use nr_cpu_ids always, as on some other 3532 * architectures, there can be many possible cpus, but never 3533 * onlined. Here we compromise between trying to avoid too high 3534 * order on systems that appear larger than they are, and too 3535 * low order on systems that appear smaller than they are. 3536 */ 3537 nr_cpus = num_present_cpus(); 3538 if (nr_cpus <= 1) 3539 nr_cpus = nr_cpu_ids; 3540 min_objects = 4 * (fls(nr_cpus) + 1); 3541 } 3542 max_objects = order_objects(slub_max_order, size); 3543 min_objects = min(min_objects, max_objects); 3544 3545 while (min_objects > 1) { 3546 unsigned int fraction; 3547 3548 fraction = 16; 3549 while (fraction >= 4) { 3550 order = slab_order(size, min_objects, 3551 slub_max_order, fraction); 3552 if (order <= slub_max_order) 3553 return order; 3554 fraction /= 2; 3555 } 3556 min_objects--; 3557 } 3558 3559 /* 3560 * We were unable to place multiple objects in a slab. Now 3561 * lets see if we can place a single object there. 3562 */ 3563 order = slab_order(size, 1, slub_max_order, 1); 3564 if (order <= slub_max_order) 3565 return order; 3566 3567 /* 3568 * Doh this slab cannot be placed using slub_max_order. 3569 */ 3570 order = slab_order(size, 1, MAX_ORDER, 1); 3571 if (order < MAX_ORDER) 3572 return order; 3573 return -ENOSYS; 3574 } 3575 3576 static void 3577 init_kmem_cache_node(struct kmem_cache_node *n) 3578 { 3579 n->nr_partial = 0; 3580 spin_lock_init(&n->list_lock); 3581 INIT_LIST_HEAD(&n->partial); 3582 #ifdef CONFIG_SLUB_DEBUG 3583 atomic_long_set(&n->nr_slabs, 0); 3584 atomic_long_set(&n->total_objects, 0); 3585 INIT_LIST_HEAD(&n->full); 3586 #endif 3587 } 3588 3589 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3590 { 3591 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3592 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3593 3594 /* 3595 * Must align to double word boundary for the double cmpxchg 3596 * instructions to work; see __pcpu_double_call_return_bool(). 3597 */ 3598 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3599 2 * sizeof(void *)); 3600 3601 if (!s->cpu_slab) 3602 return 0; 3603 3604 init_kmem_cache_cpus(s); 3605 3606 return 1; 3607 } 3608 3609 static struct kmem_cache *kmem_cache_node; 3610 3611 /* 3612 * No kmalloc_node yet so do it by hand. We know that this is the first 3613 * slab on the node for this slabcache. There are no concurrent accesses 3614 * possible. 3615 * 3616 * Note that this function only works on the kmem_cache_node 3617 * when allocating for the kmem_cache_node. This is used for bootstrapping 3618 * memory on a fresh node that has no slab structures yet. 3619 */ 3620 static void early_kmem_cache_node_alloc(int node) 3621 { 3622 struct page *page; 3623 struct kmem_cache_node *n; 3624 3625 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3626 3627 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3628 3629 BUG_ON(!page); 3630 if (page_to_nid(page) != node) { 3631 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3632 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3633 } 3634 3635 n = page->freelist; 3636 BUG_ON(!n); 3637 #ifdef CONFIG_SLUB_DEBUG 3638 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3639 init_tracking(kmem_cache_node, n); 3640 #endif 3641 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 3642 page->freelist = get_freepointer(kmem_cache_node, n); 3643 page->inuse = 1; 3644 page->frozen = 0; 3645 kmem_cache_node->node[node] = n; 3646 init_kmem_cache_node(n); 3647 inc_slabs_node(kmem_cache_node, node, page->objects); 3648 3649 /* 3650 * No locks need to be taken here as it has just been 3651 * initialized and there is no concurrent access. 3652 */ 3653 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3654 } 3655 3656 static void free_kmem_cache_nodes(struct kmem_cache *s) 3657 { 3658 int node; 3659 struct kmem_cache_node *n; 3660 3661 for_each_kmem_cache_node(s, node, n) { 3662 s->node[node] = NULL; 3663 kmem_cache_free(kmem_cache_node, n); 3664 } 3665 } 3666 3667 void __kmem_cache_release(struct kmem_cache *s) 3668 { 3669 cache_random_seq_destroy(s); 3670 free_percpu(s->cpu_slab); 3671 free_kmem_cache_nodes(s); 3672 } 3673 3674 static int init_kmem_cache_nodes(struct kmem_cache *s) 3675 { 3676 int node; 3677 3678 for_each_node_mask(node, slab_nodes) { 3679 struct kmem_cache_node *n; 3680 3681 if (slab_state == DOWN) { 3682 early_kmem_cache_node_alloc(node); 3683 continue; 3684 } 3685 n = kmem_cache_alloc_node(kmem_cache_node, 3686 GFP_KERNEL, node); 3687 3688 if (!n) { 3689 free_kmem_cache_nodes(s); 3690 return 0; 3691 } 3692 3693 init_kmem_cache_node(n); 3694 s->node[node] = n; 3695 } 3696 return 1; 3697 } 3698 3699 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3700 { 3701 if (min < MIN_PARTIAL) 3702 min = MIN_PARTIAL; 3703 else if (min > MAX_PARTIAL) 3704 min = MAX_PARTIAL; 3705 s->min_partial = min; 3706 } 3707 3708 static void set_cpu_partial(struct kmem_cache *s) 3709 { 3710 #ifdef CONFIG_SLUB_CPU_PARTIAL 3711 /* 3712 * cpu_partial determined the maximum number of objects kept in the 3713 * per cpu partial lists of a processor. 3714 * 3715 * Per cpu partial lists mainly contain slabs that just have one 3716 * object freed. If they are used for allocation then they can be 3717 * filled up again with minimal effort. The slab will never hit the 3718 * per node partial lists and therefore no locking will be required. 3719 * 3720 * This setting also determines 3721 * 3722 * A) The number of objects from per cpu partial slabs dumped to the 3723 * per node list when we reach the limit. 3724 * B) The number of objects in cpu partial slabs to extract from the 3725 * per node list when we run out of per cpu objects. We only fetch 3726 * 50% to keep some capacity around for frees. 3727 */ 3728 if (!kmem_cache_has_cpu_partial(s)) 3729 slub_set_cpu_partial(s, 0); 3730 else if (s->size >= PAGE_SIZE) 3731 slub_set_cpu_partial(s, 2); 3732 else if (s->size >= 1024) 3733 slub_set_cpu_partial(s, 6); 3734 else if (s->size >= 256) 3735 slub_set_cpu_partial(s, 13); 3736 else 3737 slub_set_cpu_partial(s, 30); 3738 #endif 3739 } 3740 3741 /* 3742 * calculate_sizes() determines the order and the distribution of data within 3743 * a slab object. 3744 */ 3745 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3746 { 3747 slab_flags_t flags = s->flags; 3748 unsigned int size = s->object_size; 3749 unsigned int order; 3750 3751 /* 3752 * Round up object size to the next word boundary. We can only 3753 * place the free pointer at word boundaries and this determines 3754 * the possible location of the free pointer. 3755 */ 3756 size = ALIGN(size, sizeof(void *)); 3757 3758 #ifdef CONFIG_SLUB_DEBUG 3759 /* 3760 * Determine if we can poison the object itself. If the user of 3761 * the slab may touch the object after free or before allocation 3762 * then we should never poison the object itself. 3763 */ 3764 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 3765 !s->ctor) 3766 s->flags |= __OBJECT_POISON; 3767 else 3768 s->flags &= ~__OBJECT_POISON; 3769 3770 3771 /* 3772 * If we are Redzoning then check if there is some space between the 3773 * end of the object and the free pointer. If not then add an 3774 * additional word to have some bytes to store Redzone information. 3775 */ 3776 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 3777 size += sizeof(void *); 3778 #endif 3779 3780 /* 3781 * With that we have determined the number of bytes in actual use 3782 * by the object and redzoning. 3783 */ 3784 s->inuse = size; 3785 3786 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 3787 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 3788 s->ctor) { 3789 /* 3790 * Relocate free pointer after the object if it is not 3791 * permitted to overwrite the first word of the object on 3792 * kmem_cache_free. 3793 * 3794 * This is the case if we do RCU, have a constructor or 3795 * destructor, are poisoning the objects, or are 3796 * redzoning an object smaller than sizeof(void *). 3797 * 3798 * The assumption that s->offset >= s->inuse means free 3799 * pointer is outside of the object is used in the 3800 * freeptr_outside_object() function. If that is no 3801 * longer true, the function needs to be modified. 3802 */ 3803 s->offset = size; 3804 size += sizeof(void *); 3805 } else { 3806 /* 3807 * Store freelist pointer near middle of object to keep 3808 * it away from the edges of the object to avoid small 3809 * sized over/underflows from neighboring allocations. 3810 */ 3811 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 3812 } 3813 3814 #ifdef CONFIG_SLUB_DEBUG 3815 if (flags & SLAB_STORE_USER) 3816 /* 3817 * Need to store information about allocs and frees after 3818 * the object. 3819 */ 3820 size += 2 * sizeof(struct track); 3821 #endif 3822 3823 kasan_cache_create(s, &size, &s->flags); 3824 #ifdef CONFIG_SLUB_DEBUG 3825 if (flags & SLAB_RED_ZONE) { 3826 /* 3827 * Add some empty padding so that we can catch 3828 * overwrites from earlier objects rather than let 3829 * tracking information or the free pointer be 3830 * corrupted if a user writes before the start 3831 * of the object. 3832 */ 3833 size += sizeof(void *); 3834 3835 s->red_left_pad = sizeof(void *); 3836 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 3837 size += s->red_left_pad; 3838 } 3839 #endif 3840 3841 /* 3842 * SLUB stores one object immediately after another beginning from 3843 * offset 0. In order to align the objects we have to simply size 3844 * each object to conform to the alignment. 3845 */ 3846 size = ALIGN(size, s->align); 3847 s->size = size; 3848 s->reciprocal_size = reciprocal_value(size); 3849 if (forced_order >= 0) 3850 order = forced_order; 3851 else 3852 order = calculate_order(size); 3853 3854 if ((int)order < 0) 3855 return 0; 3856 3857 s->allocflags = 0; 3858 if (order) 3859 s->allocflags |= __GFP_COMP; 3860 3861 if (s->flags & SLAB_CACHE_DMA) 3862 s->allocflags |= GFP_DMA; 3863 3864 if (s->flags & SLAB_CACHE_DMA32) 3865 s->allocflags |= GFP_DMA32; 3866 3867 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3868 s->allocflags |= __GFP_RECLAIMABLE; 3869 3870 /* 3871 * Determine the number of objects per slab 3872 */ 3873 s->oo = oo_make(order, size); 3874 s->min = oo_make(get_order(size), size); 3875 if (oo_objects(s->oo) > oo_objects(s->max)) 3876 s->max = s->oo; 3877 3878 return !!oo_objects(s->oo); 3879 } 3880 3881 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3882 { 3883 s->flags = kmem_cache_flags(s->size, flags, s->name); 3884 #ifdef CONFIG_SLAB_FREELIST_HARDENED 3885 s->random = get_random_long(); 3886 #endif 3887 3888 if (!calculate_sizes(s, -1)) 3889 goto error; 3890 if (disable_higher_order_debug) { 3891 /* 3892 * Disable debugging flags that store metadata if the min slab 3893 * order increased. 3894 */ 3895 if (get_order(s->size) > get_order(s->object_size)) { 3896 s->flags &= ~DEBUG_METADATA_FLAGS; 3897 s->offset = 0; 3898 if (!calculate_sizes(s, -1)) 3899 goto error; 3900 } 3901 } 3902 3903 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 3904 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 3905 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 3906 /* Enable fast mode */ 3907 s->flags |= __CMPXCHG_DOUBLE; 3908 #endif 3909 3910 /* 3911 * The larger the object size is, the more pages we want on the partial 3912 * list to avoid pounding the page allocator excessively. 3913 */ 3914 set_min_partial(s, ilog2(s->size) / 2); 3915 3916 set_cpu_partial(s); 3917 3918 #ifdef CONFIG_NUMA 3919 s->remote_node_defrag_ratio = 1000; 3920 #endif 3921 3922 /* Initialize the pre-computed randomized freelist if slab is up */ 3923 if (slab_state >= UP) { 3924 if (init_cache_random_seq(s)) 3925 goto error; 3926 } 3927 3928 if (!init_kmem_cache_nodes(s)) 3929 goto error; 3930 3931 if (alloc_kmem_cache_cpus(s)) 3932 return 0; 3933 3934 free_kmem_cache_nodes(s); 3935 error: 3936 return -EINVAL; 3937 } 3938 3939 static void list_slab_objects(struct kmem_cache *s, struct page *page, 3940 const char *text) 3941 { 3942 #ifdef CONFIG_SLUB_DEBUG 3943 void *addr = page_address(page); 3944 unsigned long *map; 3945 void *p; 3946 3947 slab_err(s, page, text, s->name); 3948 slab_lock(page); 3949 3950 map = get_map(s, page); 3951 for_each_object(p, s, addr, page->objects) { 3952 3953 if (!test_bit(__obj_to_index(s, addr, p), map)) { 3954 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 3955 print_tracking(s, p); 3956 } 3957 } 3958 put_map(map); 3959 slab_unlock(page); 3960 #endif 3961 } 3962 3963 /* 3964 * Attempt to free all partial slabs on a node. 3965 * This is called from __kmem_cache_shutdown(). We must take list_lock 3966 * because sysfs file might still access partial list after the shutdowning. 3967 */ 3968 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3969 { 3970 LIST_HEAD(discard); 3971 struct page *page, *h; 3972 3973 BUG_ON(irqs_disabled()); 3974 spin_lock_irq(&n->list_lock); 3975 list_for_each_entry_safe(page, h, &n->partial, slab_list) { 3976 if (!page->inuse) { 3977 remove_partial(n, page); 3978 list_add(&page->slab_list, &discard); 3979 } else { 3980 list_slab_objects(s, page, 3981 "Objects remaining in %s on __kmem_cache_shutdown()"); 3982 } 3983 } 3984 spin_unlock_irq(&n->list_lock); 3985 3986 list_for_each_entry_safe(page, h, &discard, slab_list) 3987 discard_slab(s, page); 3988 } 3989 3990 bool __kmem_cache_empty(struct kmem_cache *s) 3991 { 3992 int node; 3993 struct kmem_cache_node *n; 3994 3995 for_each_kmem_cache_node(s, node, n) 3996 if (n->nr_partial || slabs_node(s, node)) 3997 return false; 3998 return true; 3999 } 4000 4001 /* 4002 * Release all resources used by a slab cache. 4003 */ 4004 int __kmem_cache_shutdown(struct kmem_cache *s) 4005 { 4006 int node; 4007 struct kmem_cache_node *n; 4008 4009 flush_all(s); 4010 /* Attempt to free all objects */ 4011 for_each_kmem_cache_node(s, node, n) { 4012 free_partial(s, n); 4013 if (n->nr_partial || slabs_node(s, node)) 4014 return 1; 4015 } 4016 return 0; 4017 } 4018 4019 #ifdef CONFIG_PRINTK 4020 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) 4021 { 4022 void *base; 4023 int __maybe_unused i; 4024 unsigned int objnr; 4025 void *objp; 4026 void *objp0; 4027 struct kmem_cache *s = page->slab_cache; 4028 struct track __maybe_unused *trackp; 4029 4030 kpp->kp_ptr = object; 4031 kpp->kp_page = page; 4032 kpp->kp_slab_cache = s; 4033 base = page_address(page); 4034 objp0 = kasan_reset_tag(object); 4035 #ifdef CONFIG_SLUB_DEBUG 4036 objp = restore_red_left(s, objp0); 4037 #else 4038 objp = objp0; 4039 #endif 4040 objnr = obj_to_index(s, page, objp); 4041 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 4042 objp = base + s->size * objnr; 4043 kpp->kp_objp = objp; 4044 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || 4045 !(s->flags & SLAB_STORE_USER)) 4046 return; 4047 #ifdef CONFIG_SLUB_DEBUG 4048 trackp = get_track(s, objp, TRACK_ALLOC); 4049 kpp->kp_ret = (void *)trackp->addr; 4050 #ifdef CONFIG_STACKTRACE 4051 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { 4052 kpp->kp_stack[i] = (void *)trackp->addrs[i]; 4053 if (!kpp->kp_stack[i]) 4054 break; 4055 } 4056 #endif 4057 #endif 4058 } 4059 #endif 4060 4061 /******************************************************************** 4062 * Kmalloc subsystem 4063 *******************************************************************/ 4064 4065 static int __init setup_slub_min_order(char *str) 4066 { 4067 get_option(&str, (int *)&slub_min_order); 4068 4069 return 1; 4070 } 4071 4072 __setup("slub_min_order=", setup_slub_min_order); 4073 4074 static int __init setup_slub_max_order(char *str) 4075 { 4076 get_option(&str, (int *)&slub_max_order); 4077 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4078 4079 return 1; 4080 } 4081 4082 __setup("slub_max_order=", setup_slub_max_order); 4083 4084 static int __init setup_slub_min_objects(char *str) 4085 { 4086 get_option(&str, (int *)&slub_min_objects); 4087 4088 return 1; 4089 } 4090 4091 __setup("slub_min_objects=", setup_slub_min_objects); 4092 4093 void *__kmalloc(size_t size, gfp_t flags) 4094 { 4095 struct kmem_cache *s; 4096 void *ret; 4097 4098 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4099 return kmalloc_large(size, flags); 4100 4101 s = kmalloc_slab(size, flags); 4102 4103 if (unlikely(ZERO_OR_NULL_PTR(s))) 4104 return s; 4105 4106 ret = slab_alloc(s, flags, _RET_IP_, size); 4107 4108 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 4109 4110 ret = kasan_kmalloc(s, ret, size, flags); 4111 4112 return ret; 4113 } 4114 EXPORT_SYMBOL(__kmalloc); 4115 4116 #ifdef CONFIG_NUMA 4117 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4118 { 4119 struct page *page; 4120 void *ptr = NULL; 4121 unsigned int order = get_order(size); 4122 4123 flags |= __GFP_COMP; 4124 page = alloc_pages_node(node, flags, order); 4125 if (page) { 4126 ptr = page_address(page); 4127 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4128 PAGE_SIZE << order); 4129 } 4130 4131 return kmalloc_large_node_hook(ptr, size, flags); 4132 } 4133 4134 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4135 { 4136 struct kmem_cache *s; 4137 void *ret; 4138 4139 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4140 ret = kmalloc_large_node(size, flags, node); 4141 4142 trace_kmalloc_node(_RET_IP_, ret, 4143 size, PAGE_SIZE << get_order(size), 4144 flags, node); 4145 4146 return ret; 4147 } 4148 4149 s = kmalloc_slab(size, flags); 4150 4151 if (unlikely(ZERO_OR_NULL_PTR(s))) 4152 return s; 4153 4154 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); 4155 4156 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 4157 4158 ret = kasan_kmalloc(s, ret, size, flags); 4159 4160 return ret; 4161 } 4162 EXPORT_SYMBOL(__kmalloc_node); 4163 #endif /* CONFIG_NUMA */ 4164 4165 #ifdef CONFIG_HARDENED_USERCOPY 4166 /* 4167 * Rejects incorrectly sized objects and objects that are to be copied 4168 * to/from userspace but do not fall entirely within the containing slab 4169 * cache's usercopy region. 4170 * 4171 * Returns NULL if check passes, otherwise const char * to name of cache 4172 * to indicate an error. 4173 */ 4174 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4175 bool to_user) 4176 { 4177 struct kmem_cache *s; 4178 unsigned int offset; 4179 size_t object_size; 4180 bool is_kfence = is_kfence_address(ptr); 4181 4182 ptr = kasan_reset_tag(ptr); 4183 4184 /* Find object and usable object size. */ 4185 s = page->slab_cache; 4186 4187 /* Reject impossible pointers. */ 4188 if (ptr < page_address(page)) 4189 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4190 to_user, 0, n); 4191 4192 /* Find offset within object. */ 4193 if (is_kfence) 4194 offset = ptr - kfence_object_start(ptr); 4195 else 4196 offset = (ptr - page_address(page)) % s->size; 4197 4198 /* Adjust for redzone and reject if within the redzone. */ 4199 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4200 if (offset < s->red_left_pad) 4201 usercopy_abort("SLUB object in left red zone", 4202 s->name, to_user, offset, n); 4203 offset -= s->red_left_pad; 4204 } 4205 4206 /* Allow address range falling entirely within usercopy region. */ 4207 if (offset >= s->useroffset && 4208 offset - s->useroffset <= s->usersize && 4209 n <= s->useroffset - offset + s->usersize) 4210 return; 4211 4212 /* 4213 * If the copy is still within the allocated object, produce 4214 * a warning instead of rejecting the copy. This is intended 4215 * to be a temporary method to find any missing usercopy 4216 * whitelists. 4217 */ 4218 object_size = slab_ksize(s); 4219 if (usercopy_fallback && 4220 offset <= object_size && n <= object_size - offset) { 4221 usercopy_warn("SLUB object", s->name, to_user, offset, n); 4222 return; 4223 } 4224 4225 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4226 } 4227 #endif /* CONFIG_HARDENED_USERCOPY */ 4228 4229 size_t __ksize(const void *object) 4230 { 4231 struct page *page; 4232 4233 if (unlikely(object == ZERO_SIZE_PTR)) 4234 return 0; 4235 4236 page = virt_to_head_page(object); 4237 4238 if (unlikely(!PageSlab(page))) { 4239 WARN_ON(!PageCompound(page)); 4240 return page_size(page); 4241 } 4242 4243 return slab_ksize(page->slab_cache); 4244 } 4245 EXPORT_SYMBOL(__ksize); 4246 4247 void kfree(const void *x) 4248 { 4249 struct page *page; 4250 void *object = (void *)x; 4251 4252 trace_kfree(_RET_IP_, x); 4253 4254 if (unlikely(ZERO_OR_NULL_PTR(x))) 4255 return; 4256 4257 page = virt_to_head_page(x); 4258 if (unlikely(!PageSlab(page))) { 4259 unsigned int order = compound_order(page); 4260 4261 BUG_ON(!PageCompound(page)); 4262 kfree_hook(object); 4263 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4264 -(PAGE_SIZE << order)); 4265 __free_pages(page, order); 4266 return; 4267 } 4268 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 4269 } 4270 EXPORT_SYMBOL(kfree); 4271 4272 #define SHRINK_PROMOTE_MAX 32 4273 4274 /* 4275 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4276 * up most to the head of the partial lists. New allocations will then 4277 * fill those up and thus they can be removed from the partial lists. 4278 * 4279 * The slabs with the least items are placed last. This results in them 4280 * being allocated from last increasing the chance that the last objects 4281 * are freed in them. 4282 */ 4283 int __kmem_cache_shrink(struct kmem_cache *s) 4284 { 4285 int node; 4286 int i; 4287 struct kmem_cache_node *n; 4288 struct page *page; 4289 struct page *t; 4290 struct list_head discard; 4291 struct list_head promote[SHRINK_PROMOTE_MAX]; 4292 unsigned long flags; 4293 int ret = 0; 4294 4295 flush_all(s); 4296 for_each_kmem_cache_node(s, node, n) { 4297 INIT_LIST_HEAD(&discard); 4298 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4299 INIT_LIST_HEAD(promote + i); 4300 4301 spin_lock_irqsave(&n->list_lock, flags); 4302 4303 /* 4304 * Build lists of slabs to discard or promote. 4305 * 4306 * Note that concurrent frees may occur while we hold the 4307 * list_lock. page->inuse here is the upper limit. 4308 */ 4309 list_for_each_entry_safe(page, t, &n->partial, slab_list) { 4310 int free = page->objects - page->inuse; 4311 4312 /* Do not reread page->inuse */ 4313 barrier(); 4314 4315 /* We do not keep full slabs on the list */ 4316 BUG_ON(free <= 0); 4317 4318 if (free == page->objects) { 4319 list_move(&page->slab_list, &discard); 4320 n->nr_partial--; 4321 } else if (free <= SHRINK_PROMOTE_MAX) 4322 list_move(&page->slab_list, promote + free - 1); 4323 } 4324 4325 /* 4326 * Promote the slabs filled up most to the head of the 4327 * partial list. 4328 */ 4329 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4330 list_splice(promote + i, &n->partial); 4331 4332 spin_unlock_irqrestore(&n->list_lock, flags); 4333 4334 /* Release empty slabs */ 4335 list_for_each_entry_safe(page, t, &discard, slab_list) 4336 discard_slab(s, page); 4337 4338 if (slabs_node(s, node)) 4339 ret = 1; 4340 } 4341 4342 return ret; 4343 } 4344 4345 static int slab_mem_going_offline_callback(void *arg) 4346 { 4347 struct kmem_cache *s; 4348 4349 mutex_lock(&slab_mutex); 4350 list_for_each_entry(s, &slab_caches, list) 4351 __kmem_cache_shrink(s); 4352 mutex_unlock(&slab_mutex); 4353 4354 return 0; 4355 } 4356 4357 static void slab_mem_offline_callback(void *arg) 4358 { 4359 struct memory_notify *marg = arg; 4360 int offline_node; 4361 4362 offline_node = marg->status_change_nid_normal; 4363 4364 /* 4365 * If the node still has available memory. we need kmem_cache_node 4366 * for it yet. 4367 */ 4368 if (offline_node < 0) 4369 return; 4370 4371 mutex_lock(&slab_mutex); 4372 node_clear(offline_node, slab_nodes); 4373 /* 4374 * We no longer free kmem_cache_node structures here, as it would be 4375 * racy with all get_node() users, and infeasible to protect them with 4376 * slab_mutex. 4377 */ 4378 mutex_unlock(&slab_mutex); 4379 } 4380 4381 static int slab_mem_going_online_callback(void *arg) 4382 { 4383 struct kmem_cache_node *n; 4384 struct kmem_cache *s; 4385 struct memory_notify *marg = arg; 4386 int nid = marg->status_change_nid_normal; 4387 int ret = 0; 4388 4389 /* 4390 * If the node's memory is already available, then kmem_cache_node is 4391 * already created. Nothing to do. 4392 */ 4393 if (nid < 0) 4394 return 0; 4395 4396 /* 4397 * We are bringing a node online. No memory is available yet. We must 4398 * allocate a kmem_cache_node structure in order to bring the node 4399 * online. 4400 */ 4401 mutex_lock(&slab_mutex); 4402 list_for_each_entry(s, &slab_caches, list) { 4403 /* 4404 * The structure may already exist if the node was previously 4405 * onlined and offlined. 4406 */ 4407 if (get_node(s, nid)) 4408 continue; 4409 /* 4410 * XXX: kmem_cache_alloc_node will fallback to other nodes 4411 * since memory is not yet available from the node that 4412 * is brought up. 4413 */ 4414 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4415 if (!n) { 4416 ret = -ENOMEM; 4417 goto out; 4418 } 4419 init_kmem_cache_node(n); 4420 s->node[nid] = n; 4421 } 4422 /* 4423 * Any cache created after this point will also have kmem_cache_node 4424 * initialized for the new node. 4425 */ 4426 node_set(nid, slab_nodes); 4427 out: 4428 mutex_unlock(&slab_mutex); 4429 return ret; 4430 } 4431 4432 static int slab_memory_callback(struct notifier_block *self, 4433 unsigned long action, void *arg) 4434 { 4435 int ret = 0; 4436 4437 switch (action) { 4438 case MEM_GOING_ONLINE: 4439 ret = slab_mem_going_online_callback(arg); 4440 break; 4441 case MEM_GOING_OFFLINE: 4442 ret = slab_mem_going_offline_callback(arg); 4443 break; 4444 case MEM_OFFLINE: 4445 case MEM_CANCEL_ONLINE: 4446 slab_mem_offline_callback(arg); 4447 break; 4448 case MEM_ONLINE: 4449 case MEM_CANCEL_OFFLINE: 4450 break; 4451 } 4452 if (ret) 4453 ret = notifier_from_errno(ret); 4454 else 4455 ret = NOTIFY_OK; 4456 return ret; 4457 } 4458 4459 static struct notifier_block slab_memory_callback_nb = { 4460 .notifier_call = slab_memory_callback, 4461 .priority = SLAB_CALLBACK_PRI, 4462 }; 4463 4464 /******************************************************************** 4465 * Basic setup of slabs 4466 *******************************************************************/ 4467 4468 /* 4469 * Used for early kmem_cache structures that were allocated using 4470 * the page allocator. Allocate them properly then fix up the pointers 4471 * that may be pointing to the wrong kmem_cache structure. 4472 */ 4473 4474 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4475 { 4476 int node; 4477 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4478 struct kmem_cache_node *n; 4479 4480 memcpy(s, static_cache, kmem_cache->object_size); 4481 4482 /* 4483 * This runs very early, and only the boot processor is supposed to be 4484 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4485 * IPIs around. 4486 */ 4487 __flush_cpu_slab(s, smp_processor_id()); 4488 for_each_kmem_cache_node(s, node, n) { 4489 struct page *p; 4490 4491 list_for_each_entry(p, &n->partial, slab_list) 4492 p->slab_cache = s; 4493 4494 #ifdef CONFIG_SLUB_DEBUG 4495 list_for_each_entry(p, &n->full, slab_list) 4496 p->slab_cache = s; 4497 #endif 4498 } 4499 list_add(&s->list, &slab_caches); 4500 return s; 4501 } 4502 4503 void __init kmem_cache_init(void) 4504 { 4505 static __initdata struct kmem_cache boot_kmem_cache, 4506 boot_kmem_cache_node; 4507 int node; 4508 4509 if (debug_guardpage_minorder()) 4510 slub_max_order = 0; 4511 4512 /* Print slub debugging pointers without hashing */ 4513 if (__slub_debug_enabled()) 4514 no_hash_pointers_enable(NULL); 4515 4516 kmem_cache_node = &boot_kmem_cache_node; 4517 kmem_cache = &boot_kmem_cache; 4518 4519 /* 4520 * Initialize the nodemask for which we will allocate per node 4521 * structures. Here we don't need taking slab_mutex yet. 4522 */ 4523 for_each_node_state(node, N_NORMAL_MEMORY) 4524 node_set(node, slab_nodes); 4525 4526 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4527 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4528 4529 register_hotmemory_notifier(&slab_memory_callback_nb); 4530 4531 /* Able to allocate the per node structures */ 4532 slab_state = PARTIAL; 4533 4534 create_boot_cache(kmem_cache, "kmem_cache", 4535 offsetof(struct kmem_cache, node) + 4536 nr_node_ids * sizeof(struct kmem_cache_node *), 4537 SLAB_HWCACHE_ALIGN, 0, 0); 4538 4539 kmem_cache = bootstrap(&boot_kmem_cache); 4540 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4541 4542 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4543 setup_kmalloc_cache_index_table(); 4544 create_kmalloc_caches(0); 4545 4546 /* Setup random freelists for each cache */ 4547 init_freelist_randomization(); 4548 4549 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4550 slub_cpu_dead); 4551 4552 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4553 cache_line_size(), 4554 slub_min_order, slub_max_order, slub_min_objects, 4555 nr_cpu_ids, nr_node_ids); 4556 } 4557 4558 void __init kmem_cache_init_late(void) 4559 { 4560 } 4561 4562 struct kmem_cache * 4563 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4564 slab_flags_t flags, void (*ctor)(void *)) 4565 { 4566 struct kmem_cache *s; 4567 4568 s = find_mergeable(size, align, flags, name, ctor); 4569 if (s) { 4570 s->refcount++; 4571 4572 /* 4573 * Adjust the object sizes so that we clear 4574 * the complete object on kzalloc. 4575 */ 4576 s->object_size = max(s->object_size, size); 4577 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4578 4579 if (sysfs_slab_alias(s, name)) { 4580 s->refcount--; 4581 s = NULL; 4582 } 4583 } 4584 4585 return s; 4586 } 4587 4588 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4589 { 4590 int err; 4591 4592 err = kmem_cache_open(s, flags); 4593 if (err) 4594 return err; 4595 4596 /* Mutex is not taken during early boot */ 4597 if (slab_state <= UP) 4598 return 0; 4599 4600 err = sysfs_slab_add(s); 4601 if (err) 4602 __kmem_cache_release(s); 4603 4604 if (s->flags & SLAB_STORE_USER) 4605 debugfs_slab_add(s); 4606 4607 return err; 4608 } 4609 4610 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4611 { 4612 struct kmem_cache *s; 4613 void *ret; 4614 4615 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4616 return kmalloc_large(size, gfpflags); 4617 4618 s = kmalloc_slab(size, gfpflags); 4619 4620 if (unlikely(ZERO_OR_NULL_PTR(s))) 4621 return s; 4622 4623 ret = slab_alloc(s, gfpflags, caller, size); 4624 4625 /* Honor the call site pointer we received. */ 4626 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4627 4628 return ret; 4629 } 4630 EXPORT_SYMBOL(__kmalloc_track_caller); 4631 4632 #ifdef CONFIG_NUMA 4633 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4634 int node, unsigned long caller) 4635 { 4636 struct kmem_cache *s; 4637 void *ret; 4638 4639 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4640 ret = kmalloc_large_node(size, gfpflags, node); 4641 4642 trace_kmalloc_node(caller, ret, 4643 size, PAGE_SIZE << get_order(size), 4644 gfpflags, node); 4645 4646 return ret; 4647 } 4648 4649 s = kmalloc_slab(size, gfpflags); 4650 4651 if (unlikely(ZERO_OR_NULL_PTR(s))) 4652 return s; 4653 4654 ret = slab_alloc_node(s, gfpflags, node, caller, size); 4655 4656 /* Honor the call site pointer we received. */ 4657 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4658 4659 return ret; 4660 } 4661 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4662 #endif 4663 4664 #ifdef CONFIG_SYSFS 4665 static int count_inuse(struct page *page) 4666 { 4667 return page->inuse; 4668 } 4669 4670 static int count_total(struct page *page) 4671 { 4672 return page->objects; 4673 } 4674 #endif 4675 4676 #ifdef CONFIG_SLUB_DEBUG 4677 static void validate_slab(struct kmem_cache *s, struct page *page) 4678 { 4679 void *p; 4680 void *addr = page_address(page); 4681 unsigned long *map; 4682 4683 slab_lock(page); 4684 4685 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) 4686 goto unlock; 4687 4688 /* Now we know that a valid freelist exists */ 4689 map = get_map(s, page); 4690 for_each_object(p, s, addr, page->objects) { 4691 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? 4692 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4693 4694 if (!check_object(s, page, p, val)) 4695 break; 4696 } 4697 put_map(map); 4698 unlock: 4699 slab_unlock(page); 4700 } 4701 4702 static int validate_slab_node(struct kmem_cache *s, 4703 struct kmem_cache_node *n) 4704 { 4705 unsigned long count = 0; 4706 struct page *page; 4707 unsigned long flags; 4708 4709 spin_lock_irqsave(&n->list_lock, flags); 4710 4711 list_for_each_entry(page, &n->partial, slab_list) { 4712 validate_slab(s, page); 4713 count++; 4714 } 4715 if (count != n->nr_partial) { 4716 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4717 s->name, count, n->nr_partial); 4718 slab_add_kunit_errors(); 4719 } 4720 4721 if (!(s->flags & SLAB_STORE_USER)) 4722 goto out; 4723 4724 list_for_each_entry(page, &n->full, slab_list) { 4725 validate_slab(s, page); 4726 count++; 4727 } 4728 if (count != atomic_long_read(&n->nr_slabs)) { 4729 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4730 s->name, count, atomic_long_read(&n->nr_slabs)); 4731 slab_add_kunit_errors(); 4732 } 4733 4734 out: 4735 spin_unlock_irqrestore(&n->list_lock, flags); 4736 return count; 4737 } 4738 4739 long validate_slab_cache(struct kmem_cache *s) 4740 { 4741 int node; 4742 unsigned long count = 0; 4743 struct kmem_cache_node *n; 4744 4745 flush_all(s); 4746 for_each_kmem_cache_node(s, node, n) 4747 count += validate_slab_node(s, n); 4748 4749 return count; 4750 } 4751 EXPORT_SYMBOL(validate_slab_cache); 4752 4753 #ifdef CONFIG_DEBUG_FS 4754 /* 4755 * Generate lists of code addresses where slabcache objects are allocated 4756 * and freed. 4757 */ 4758 4759 struct location { 4760 unsigned long count; 4761 unsigned long addr; 4762 long long sum_time; 4763 long min_time; 4764 long max_time; 4765 long min_pid; 4766 long max_pid; 4767 DECLARE_BITMAP(cpus, NR_CPUS); 4768 nodemask_t nodes; 4769 }; 4770 4771 struct loc_track { 4772 unsigned long max; 4773 unsigned long count; 4774 struct location *loc; 4775 }; 4776 4777 static struct dentry *slab_debugfs_root; 4778 4779 static void free_loc_track(struct loc_track *t) 4780 { 4781 if (t->max) 4782 free_pages((unsigned long)t->loc, 4783 get_order(sizeof(struct location) * t->max)); 4784 } 4785 4786 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4787 { 4788 struct location *l; 4789 int order; 4790 4791 order = get_order(sizeof(struct location) * max); 4792 4793 l = (void *)__get_free_pages(flags, order); 4794 if (!l) 4795 return 0; 4796 4797 if (t->count) { 4798 memcpy(l, t->loc, sizeof(struct location) * t->count); 4799 free_loc_track(t); 4800 } 4801 t->max = max; 4802 t->loc = l; 4803 return 1; 4804 } 4805 4806 static int add_location(struct loc_track *t, struct kmem_cache *s, 4807 const struct track *track) 4808 { 4809 long start, end, pos; 4810 struct location *l; 4811 unsigned long caddr; 4812 unsigned long age = jiffies - track->when; 4813 4814 start = -1; 4815 end = t->count; 4816 4817 for ( ; ; ) { 4818 pos = start + (end - start + 1) / 2; 4819 4820 /* 4821 * There is nothing at "end". If we end up there 4822 * we need to add something to before end. 4823 */ 4824 if (pos == end) 4825 break; 4826 4827 caddr = t->loc[pos].addr; 4828 if (track->addr == caddr) { 4829 4830 l = &t->loc[pos]; 4831 l->count++; 4832 if (track->when) { 4833 l->sum_time += age; 4834 if (age < l->min_time) 4835 l->min_time = age; 4836 if (age > l->max_time) 4837 l->max_time = age; 4838 4839 if (track->pid < l->min_pid) 4840 l->min_pid = track->pid; 4841 if (track->pid > l->max_pid) 4842 l->max_pid = track->pid; 4843 4844 cpumask_set_cpu(track->cpu, 4845 to_cpumask(l->cpus)); 4846 } 4847 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4848 return 1; 4849 } 4850 4851 if (track->addr < caddr) 4852 end = pos; 4853 else 4854 start = pos; 4855 } 4856 4857 /* 4858 * Not found. Insert new tracking element. 4859 */ 4860 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4861 return 0; 4862 4863 l = t->loc + pos; 4864 if (pos < t->count) 4865 memmove(l + 1, l, 4866 (t->count - pos) * sizeof(struct location)); 4867 t->count++; 4868 l->count = 1; 4869 l->addr = track->addr; 4870 l->sum_time = age; 4871 l->min_time = age; 4872 l->max_time = age; 4873 l->min_pid = track->pid; 4874 l->max_pid = track->pid; 4875 cpumask_clear(to_cpumask(l->cpus)); 4876 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4877 nodes_clear(l->nodes); 4878 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4879 return 1; 4880 } 4881 4882 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4883 struct page *page, enum track_item alloc) 4884 { 4885 void *addr = page_address(page); 4886 void *p; 4887 unsigned long *map; 4888 4889 map = get_map(s, page); 4890 for_each_object(p, s, addr, page->objects) 4891 if (!test_bit(__obj_to_index(s, addr, p), map)) 4892 add_location(t, s, get_track(s, p, alloc)); 4893 put_map(map); 4894 } 4895 #endif /* CONFIG_DEBUG_FS */ 4896 #endif /* CONFIG_SLUB_DEBUG */ 4897 4898 #ifdef CONFIG_SYSFS 4899 enum slab_stat_type { 4900 SL_ALL, /* All slabs */ 4901 SL_PARTIAL, /* Only partially allocated slabs */ 4902 SL_CPU, /* Only slabs used for cpu caches */ 4903 SL_OBJECTS, /* Determine allocated objects not slabs */ 4904 SL_TOTAL /* Determine object capacity not slabs */ 4905 }; 4906 4907 #define SO_ALL (1 << SL_ALL) 4908 #define SO_PARTIAL (1 << SL_PARTIAL) 4909 #define SO_CPU (1 << SL_CPU) 4910 #define SO_OBJECTS (1 << SL_OBJECTS) 4911 #define SO_TOTAL (1 << SL_TOTAL) 4912 4913 static ssize_t show_slab_objects(struct kmem_cache *s, 4914 char *buf, unsigned long flags) 4915 { 4916 unsigned long total = 0; 4917 int node; 4918 int x; 4919 unsigned long *nodes; 4920 int len = 0; 4921 4922 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 4923 if (!nodes) 4924 return -ENOMEM; 4925 4926 if (flags & SO_CPU) { 4927 int cpu; 4928 4929 for_each_possible_cpu(cpu) { 4930 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 4931 cpu); 4932 int node; 4933 struct page *page; 4934 4935 page = READ_ONCE(c->page); 4936 if (!page) 4937 continue; 4938 4939 node = page_to_nid(page); 4940 if (flags & SO_TOTAL) 4941 x = page->objects; 4942 else if (flags & SO_OBJECTS) 4943 x = page->inuse; 4944 else 4945 x = 1; 4946 4947 total += x; 4948 nodes[node] += x; 4949 4950 page = slub_percpu_partial_read_once(c); 4951 if (page) { 4952 node = page_to_nid(page); 4953 if (flags & SO_TOTAL) 4954 WARN_ON_ONCE(1); 4955 else if (flags & SO_OBJECTS) 4956 WARN_ON_ONCE(1); 4957 else 4958 x = page->pages; 4959 total += x; 4960 nodes[node] += x; 4961 } 4962 } 4963 } 4964 4965 /* 4966 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 4967 * already held which will conflict with an existing lock order: 4968 * 4969 * mem_hotplug_lock->slab_mutex->kernfs_mutex 4970 * 4971 * We don't really need mem_hotplug_lock (to hold off 4972 * slab_mem_going_offline_callback) here because slab's memory hot 4973 * unplug code doesn't destroy the kmem_cache->node[] data. 4974 */ 4975 4976 #ifdef CONFIG_SLUB_DEBUG 4977 if (flags & SO_ALL) { 4978 struct kmem_cache_node *n; 4979 4980 for_each_kmem_cache_node(s, node, n) { 4981 4982 if (flags & SO_TOTAL) 4983 x = atomic_long_read(&n->total_objects); 4984 else if (flags & SO_OBJECTS) 4985 x = atomic_long_read(&n->total_objects) - 4986 count_partial(n, count_free); 4987 else 4988 x = atomic_long_read(&n->nr_slabs); 4989 total += x; 4990 nodes[node] += x; 4991 } 4992 4993 } else 4994 #endif 4995 if (flags & SO_PARTIAL) { 4996 struct kmem_cache_node *n; 4997 4998 for_each_kmem_cache_node(s, node, n) { 4999 if (flags & SO_TOTAL) 5000 x = count_partial(n, count_total); 5001 else if (flags & SO_OBJECTS) 5002 x = count_partial(n, count_inuse); 5003 else 5004 x = n->nr_partial; 5005 total += x; 5006 nodes[node] += x; 5007 } 5008 } 5009 5010 len += sysfs_emit_at(buf, len, "%lu", total); 5011 #ifdef CONFIG_NUMA 5012 for (node = 0; node < nr_node_ids; node++) { 5013 if (nodes[node]) 5014 len += sysfs_emit_at(buf, len, " N%d=%lu", 5015 node, nodes[node]); 5016 } 5017 #endif 5018 len += sysfs_emit_at(buf, len, "\n"); 5019 kfree(nodes); 5020 5021 return len; 5022 } 5023 5024 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5025 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5026 5027 struct slab_attribute { 5028 struct attribute attr; 5029 ssize_t (*show)(struct kmem_cache *s, char *buf); 5030 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5031 }; 5032 5033 #define SLAB_ATTR_RO(_name) \ 5034 static struct slab_attribute _name##_attr = \ 5035 __ATTR(_name, 0400, _name##_show, NULL) 5036 5037 #define SLAB_ATTR(_name) \ 5038 static struct slab_attribute _name##_attr = \ 5039 __ATTR(_name, 0600, _name##_show, _name##_store) 5040 5041 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5042 { 5043 return sysfs_emit(buf, "%u\n", s->size); 5044 } 5045 SLAB_ATTR_RO(slab_size); 5046 5047 static ssize_t align_show(struct kmem_cache *s, char *buf) 5048 { 5049 return sysfs_emit(buf, "%u\n", s->align); 5050 } 5051 SLAB_ATTR_RO(align); 5052 5053 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5054 { 5055 return sysfs_emit(buf, "%u\n", s->object_size); 5056 } 5057 SLAB_ATTR_RO(object_size); 5058 5059 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5060 { 5061 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5062 } 5063 SLAB_ATTR_RO(objs_per_slab); 5064 5065 static ssize_t order_show(struct kmem_cache *s, char *buf) 5066 { 5067 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5068 } 5069 SLAB_ATTR_RO(order); 5070 5071 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5072 { 5073 return sysfs_emit(buf, "%lu\n", s->min_partial); 5074 } 5075 5076 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5077 size_t length) 5078 { 5079 unsigned long min; 5080 int err; 5081 5082 err = kstrtoul(buf, 10, &min); 5083 if (err) 5084 return err; 5085 5086 set_min_partial(s, min); 5087 return length; 5088 } 5089 SLAB_ATTR(min_partial); 5090 5091 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5092 { 5093 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s)); 5094 } 5095 5096 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5097 size_t length) 5098 { 5099 unsigned int objects; 5100 int err; 5101 5102 err = kstrtouint(buf, 10, &objects); 5103 if (err) 5104 return err; 5105 if (objects && !kmem_cache_has_cpu_partial(s)) 5106 return -EINVAL; 5107 5108 slub_set_cpu_partial(s, objects); 5109 flush_all(s); 5110 return length; 5111 } 5112 SLAB_ATTR(cpu_partial); 5113 5114 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5115 { 5116 if (!s->ctor) 5117 return 0; 5118 return sysfs_emit(buf, "%pS\n", s->ctor); 5119 } 5120 SLAB_ATTR_RO(ctor); 5121 5122 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5123 { 5124 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5125 } 5126 SLAB_ATTR_RO(aliases); 5127 5128 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5129 { 5130 return show_slab_objects(s, buf, SO_PARTIAL); 5131 } 5132 SLAB_ATTR_RO(partial); 5133 5134 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5135 { 5136 return show_slab_objects(s, buf, SO_CPU); 5137 } 5138 SLAB_ATTR_RO(cpu_slabs); 5139 5140 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5141 { 5142 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5143 } 5144 SLAB_ATTR_RO(objects); 5145 5146 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5147 { 5148 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5149 } 5150 SLAB_ATTR_RO(objects_partial); 5151 5152 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5153 { 5154 int objects = 0; 5155 int pages = 0; 5156 int cpu; 5157 int len = 0; 5158 5159 for_each_online_cpu(cpu) { 5160 struct page *page; 5161 5162 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5163 5164 if (page) { 5165 pages += page->pages; 5166 objects += page->pobjects; 5167 } 5168 } 5169 5170 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); 5171 5172 #ifdef CONFIG_SMP 5173 for_each_online_cpu(cpu) { 5174 struct page *page; 5175 5176 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5177 if (page) 5178 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5179 cpu, page->pobjects, page->pages); 5180 } 5181 #endif 5182 len += sysfs_emit_at(buf, len, "\n"); 5183 5184 return len; 5185 } 5186 SLAB_ATTR_RO(slabs_cpu_partial); 5187 5188 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5189 { 5190 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5191 } 5192 SLAB_ATTR_RO(reclaim_account); 5193 5194 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5195 { 5196 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5197 } 5198 SLAB_ATTR_RO(hwcache_align); 5199 5200 #ifdef CONFIG_ZONE_DMA 5201 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5202 { 5203 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5204 } 5205 SLAB_ATTR_RO(cache_dma); 5206 #endif 5207 5208 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5209 { 5210 return sysfs_emit(buf, "%u\n", s->usersize); 5211 } 5212 SLAB_ATTR_RO(usersize); 5213 5214 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5215 { 5216 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5217 } 5218 SLAB_ATTR_RO(destroy_by_rcu); 5219 5220 #ifdef CONFIG_SLUB_DEBUG 5221 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5222 { 5223 return show_slab_objects(s, buf, SO_ALL); 5224 } 5225 SLAB_ATTR_RO(slabs); 5226 5227 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5228 { 5229 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5230 } 5231 SLAB_ATTR_RO(total_objects); 5232 5233 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5234 { 5235 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5236 } 5237 SLAB_ATTR_RO(sanity_checks); 5238 5239 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5240 { 5241 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5242 } 5243 SLAB_ATTR_RO(trace); 5244 5245 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5246 { 5247 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5248 } 5249 5250 SLAB_ATTR_RO(red_zone); 5251 5252 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5253 { 5254 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5255 } 5256 5257 SLAB_ATTR_RO(poison); 5258 5259 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5260 { 5261 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5262 } 5263 5264 SLAB_ATTR_RO(store_user); 5265 5266 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5267 { 5268 return 0; 5269 } 5270 5271 static ssize_t validate_store(struct kmem_cache *s, 5272 const char *buf, size_t length) 5273 { 5274 int ret = -EINVAL; 5275 5276 if (buf[0] == '1') { 5277 ret = validate_slab_cache(s); 5278 if (ret >= 0) 5279 ret = length; 5280 } 5281 return ret; 5282 } 5283 SLAB_ATTR(validate); 5284 5285 #endif /* CONFIG_SLUB_DEBUG */ 5286 5287 #ifdef CONFIG_FAILSLAB 5288 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5289 { 5290 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5291 } 5292 SLAB_ATTR_RO(failslab); 5293 #endif 5294 5295 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5296 { 5297 return 0; 5298 } 5299 5300 static ssize_t shrink_store(struct kmem_cache *s, 5301 const char *buf, size_t length) 5302 { 5303 if (buf[0] == '1') 5304 kmem_cache_shrink(s); 5305 else 5306 return -EINVAL; 5307 return length; 5308 } 5309 SLAB_ATTR(shrink); 5310 5311 #ifdef CONFIG_NUMA 5312 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5313 { 5314 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5315 } 5316 5317 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5318 const char *buf, size_t length) 5319 { 5320 unsigned int ratio; 5321 int err; 5322 5323 err = kstrtouint(buf, 10, &ratio); 5324 if (err) 5325 return err; 5326 if (ratio > 100) 5327 return -ERANGE; 5328 5329 s->remote_node_defrag_ratio = ratio * 10; 5330 5331 return length; 5332 } 5333 SLAB_ATTR(remote_node_defrag_ratio); 5334 #endif 5335 5336 #ifdef CONFIG_SLUB_STATS 5337 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5338 { 5339 unsigned long sum = 0; 5340 int cpu; 5341 int len = 0; 5342 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5343 5344 if (!data) 5345 return -ENOMEM; 5346 5347 for_each_online_cpu(cpu) { 5348 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5349 5350 data[cpu] = x; 5351 sum += x; 5352 } 5353 5354 len += sysfs_emit_at(buf, len, "%lu", sum); 5355 5356 #ifdef CONFIG_SMP 5357 for_each_online_cpu(cpu) { 5358 if (data[cpu]) 5359 len += sysfs_emit_at(buf, len, " C%d=%u", 5360 cpu, data[cpu]); 5361 } 5362 #endif 5363 kfree(data); 5364 len += sysfs_emit_at(buf, len, "\n"); 5365 5366 return len; 5367 } 5368 5369 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5370 { 5371 int cpu; 5372 5373 for_each_online_cpu(cpu) 5374 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5375 } 5376 5377 #define STAT_ATTR(si, text) \ 5378 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5379 { \ 5380 return show_stat(s, buf, si); \ 5381 } \ 5382 static ssize_t text##_store(struct kmem_cache *s, \ 5383 const char *buf, size_t length) \ 5384 { \ 5385 if (buf[0] != '0') \ 5386 return -EINVAL; \ 5387 clear_stat(s, si); \ 5388 return length; \ 5389 } \ 5390 SLAB_ATTR(text); \ 5391 5392 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5393 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5394 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5395 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5396 STAT_ATTR(FREE_FROZEN, free_frozen); 5397 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5398 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5399 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5400 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5401 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5402 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5403 STAT_ATTR(FREE_SLAB, free_slab); 5404 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5405 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5406 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5407 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5408 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5409 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5410 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5411 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5412 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5413 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5414 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5415 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5416 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5417 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5418 #endif /* CONFIG_SLUB_STATS */ 5419 5420 static struct attribute *slab_attrs[] = { 5421 &slab_size_attr.attr, 5422 &object_size_attr.attr, 5423 &objs_per_slab_attr.attr, 5424 &order_attr.attr, 5425 &min_partial_attr.attr, 5426 &cpu_partial_attr.attr, 5427 &objects_attr.attr, 5428 &objects_partial_attr.attr, 5429 &partial_attr.attr, 5430 &cpu_slabs_attr.attr, 5431 &ctor_attr.attr, 5432 &aliases_attr.attr, 5433 &align_attr.attr, 5434 &hwcache_align_attr.attr, 5435 &reclaim_account_attr.attr, 5436 &destroy_by_rcu_attr.attr, 5437 &shrink_attr.attr, 5438 &slabs_cpu_partial_attr.attr, 5439 #ifdef CONFIG_SLUB_DEBUG 5440 &total_objects_attr.attr, 5441 &slabs_attr.attr, 5442 &sanity_checks_attr.attr, 5443 &trace_attr.attr, 5444 &red_zone_attr.attr, 5445 &poison_attr.attr, 5446 &store_user_attr.attr, 5447 &validate_attr.attr, 5448 #endif 5449 #ifdef CONFIG_ZONE_DMA 5450 &cache_dma_attr.attr, 5451 #endif 5452 #ifdef CONFIG_NUMA 5453 &remote_node_defrag_ratio_attr.attr, 5454 #endif 5455 #ifdef CONFIG_SLUB_STATS 5456 &alloc_fastpath_attr.attr, 5457 &alloc_slowpath_attr.attr, 5458 &free_fastpath_attr.attr, 5459 &free_slowpath_attr.attr, 5460 &free_frozen_attr.attr, 5461 &free_add_partial_attr.attr, 5462 &free_remove_partial_attr.attr, 5463 &alloc_from_partial_attr.attr, 5464 &alloc_slab_attr.attr, 5465 &alloc_refill_attr.attr, 5466 &alloc_node_mismatch_attr.attr, 5467 &free_slab_attr.attr, 5468 &cpuslab_flush_attr.attr, 5469 &deactivate_full_attr.attr, 5470 &deactivate_empty_attr.attr, 5471 &deactivate_to_head_attr.attr, 5472 &deactivate_to_tail_attr.attr, 5473 &deactivate_remote_frees_attr.attr, 5474 &deactivate_bypass_attr.attr, 5475 &order_fallback_attr.attr, 5476 &cmpxchg_double_fail_attr.attr, 5477 &cmpxchg_double_cpu_fail_attr.attr, 5478 &cpu_partial_alloc_attr.attr, 5479 &cpu_partial_free_attr.attr, 5480 &cpu_partial_node_attr.attr, 5481 &cpu_partial_drain_attr.attr, 5482 #endif 5483 #ifdef CONFIG_FAILSLAB 5484 &failslab_attr.attr, 5485 #endif 5486 &usersize_attr.attr, 5487 5488 NULL 5489 }; 5490 5491 static const struct attribute_group slab_attr_group = { 5492 .attrs = slab_attrs, 5493 }; 5494 5495 static ssize_t slab_attr_show(struct kobject *kobj, 5496 struct attribute *attr, 5497 char *buf) 5498 { 5499 struct slab_attribute *attribute; 5500 struct kmem_cache *s; 5501 int err; 5502 5503 attribute = to_slab_attr(attr); 5504 s = to_slab(kobj); 5505 5506 if (!attribute->show) 5507 return -EIO; 5508 5509 err = attribute->show(s, buf); 5510 5511 return err; 5512 } 5513 5514 static ssize_t slab_attr_store(struct kobject *kobj, 5515 struct attribute *attr, 5516 const char *buf, size_t len) 5517 { 5518 struct slab_attribute *attribute; 5519 struct kmem_cache *s; 5520 int err; 5521 5522 attribute = to_slab_attr(attr); 5523 s = to_slab(kobj); 5524 5525 if (!attribute->store) 5526 return -EIO; 5527 5528 err = attribute->store(s, buf, len); 5529 return err; 5530 } 5531 5532 static void kmem_cache_release(struct kobject *k) 5533 { 5534 slab_kmem_cache_release(to_slab(k)); 5535 } 5536 5537 static const struct sysfs_ops slab_sysfs_ops = { 5538 .show = slab_attr_show, 5539 .store = slab_attr_store, 5540 }; 5541 5542 static struct kobj_type slab_ktype = { 5543 .sysfs_ops = &slab_sysfs_ops, 5544 .release = kmem_cache_release, 5545 }; 5546 5547 static struct kset *slab_kset; 5548 5549 static inline struct kset *cache_kset(struct kmem_cache *s) 5550 { 5551 return slab_kset; 5552 } 5553 5554 #define ID_STR_LENGTH 64 5555 5556 /* Create a unique string id for a slab cache: 5557 * 5558 * Format :[flags-]size 5559 */ 5560 static char *create_unique_id(struct kmem_cache *s) 5561 { 5562 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5563 char *p = name; 5564 5565 BUG_ON(!name); 5566 5567 *p++ = ':'; 5568 /* 5569 * First flags affecting slabcache operations. We will only 5570 * get here for aliasable slabs so we do not need to support 5571 * too many flags. The flags here must cover all flags that 5572 * are matched during merging to guarantee that the id is 5573 * unique. 5574 */ 5575 if (s->flags & SLAB_CACHE_DMA) 5576 *p++ = 'd'; 5577 if (s->flags & SLAB_CACHE_DMA32) 5578 *p++ = 'D'; 5579 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5580 *p++ = 'a'; 5581 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5582 *p++ = 'F'; 5583 if (s->flags & SLAB_ACCOUNT) 5584 *p++ = 'A'; 5585 if (p != name + 1) 5586 *p++ = '-'; 5587 p += sprintf(p, "%07u", s->size); 5588 5589 BUG_ON(p > name + ID_STR_LENGTH - 1); 5590 return name; 5591 } 5592 5593 static int sysfs_slab_add(struct kmem_cache *s) 5594 { 5595 int err; 5596 const char *name; 5597 struct kset *kset = cache_kset(s); 5598 int unmergeable = slab_unmergeable(s); 5599 5600 if (!kset) { 5601 kobject_init(&s->kobj, &slab_ktype); 5602 return 0; 5603 } 5604 5605 if (!unmergeable && disable_higher_order_debug && 5606 (slub_debug & DEBUG_METADATA_FLAGS)) 5607 unmergeable = 1; 5608 5609 if (unmergeable) { 5610 /* 5611 * Slabcache can never be merged so we can use the name proper. 5612 * This is typically the case for debug situations. In that 5613 * case we can catch duplicate names easily. 5614 */ 5615 sysfs_remove_link(&slab_kset->kobj, s->name); 5616 name = s->name; 5617 } else { 5618 /* 5619 * Create a unique name for the slab as a target 5620 * for the symlinks. 5621 */ 5622 name = create_unique_id(s); 5623 } 5624 5625 s->kobj.kset = kset; 5626 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5627 if (err) 5628 goto out; 5629 5630 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5631 if (err) 5632 goto out_del_kobj; 5633 5634 if (!unmergeable) { 5635 /* Setup first alias */ 5636 sysfs_slab_alias(s, s->name); 5637 } 5638 out: 5639 if (!unmergeable) 5640 kfree(name); 5641 return err; 5642 out_del_kobj: 5643 kobject_del(&s->kobj); 5644 goto out; 5645 } 5646 5647 void sysfs_slab_unlink(struct kmem_cache *s) 5648 { 5649 if (slab_state >= FULL) 5650 kobject_del(&s->kobj); 5651 } 5652 5653 void sysfs_slab_release(struct kmem_cache *s) 5654 { 5655 if (slab_state >= FULL) 5656 kobject_put(&s->kobj); 5657 } 5658 5659 /* 5660 * Need to buffer aliases during bootup until sysfs becomes 5661 * available lest we lose that information. 5662 */ 5663 struct saved_alias { 5664 struct kmem_cache *s; 5665 const char *name; 5666 struct saved_alias *next; 5667 }; 5668 5669 static struct saved_alias *alias_list; 5670 5671 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5672 { 5673 struct saved_alias *al; 5674 5675 if (slab_state == FULL) { 5676 /* 5677 * If we have a leftover link then remove it. 5678 */ 5679 sysfs_remove_link(&slab_kset->kobj, name); 5680 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5681 } 5682 5683 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5684 if (!al) 5685 return -ENOMEM; 5686 5687 al->s = s; 5688 al->name = name; 5689 al->next = alias_list; 5690 alias_list = al; 5691 return 0; 5692 } 5693 5694 static int __init slab_sysfs_init(void) 5695 { 5696 struct kmem_cache *s; 5697 int err; 5698 5699 mutex_lock(&slab_mutex); 5700 5701 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5702 if (!slab_kset) { 5703 mutex_unlock(&slab_mutex); 5704 pr_err("Cannot register slab subsystem.\n"); 5705 return -ENOSYS; 5706 } 5707 5708 slab_state = FULL; 5709 5710 list_for_each_entry(s, &slab_caches, list) { 5711 err = sysfs_slab_add(s); 5712 if (err) 5713 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5714 s->name); 5715 } 5716 5717 while (alias_list) { 5718 struct saved_alias *al = alias_list; 5719 5720 alias_list = alias_list->next; 5721 err = sysfs_slab_alias(al->s, al->name); 5722 if (err) 5723 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5724 al->name); 5725 kfree(al); 5726 } 5727 5728 mutex_unlock(&slab_mutex); 5729 return 0; 5730 } 5731 5732 __initcall(slab_sysfs_init); 5733 #endif /* CONFIG_SYSFS */ 5734 5735 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 5736 static int slab_debugfs_show(struct seq_file *seq, void *v) 5737 { 5738 5739 struct location *l; 5740 unsigned int idx = *(unsigned int *)v; 5741 struct loc_track *t = seq->private; 5742 5743 if (idx < t->count) { 5744 l = &t->loc[idx]; 5745 5746 seq_printf(seq, "%7ld ", l->count); 5747 5748 if (l->addr) 5749 seq_printf(seq, "%pS", (void *)l->addr); 5750 else 5751 seq_puts(seq, "<not-available>"); 5752 5753 if (l->sum_time != l->min_time) { 5754 seq_printf(seq, " age=%ld/%llu/%ld", 5755 l->min_time, div_u64(l->sum_time, l->count), 5756 l->max_time); 5757 } else 5758 seq_printf(seq, " age=%ld", l->min_time); 5759 5760 if (l->min_pid != l->max_pid) 5761 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 5762 else 5763 seq_printf(seq, " pid=%ld", 5764 l->min_pid); 5765 5766 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 5767 seq_printf(seq, " cpus=%*pbl", 5768 cpumask_pr_args(to_cpumask(l->cpus))); 5769 5770 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 5771 seq_printf(seq, " nodes=%*pbl", 5772 nodemask_pr_args(&l->nodes)); 5773 5774 seq_puts(seq, "\n"); 5775 } 5776 5777 if (!idx && !t->count) 5778 seq_puts(seq, "No data\n"); 5779 5780 return 0; 5781 } 5782 5783 static void slab_debugfs_stop(struct seq_file *seq, void *v) 5784 { 5785 } 5786 5787 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 5788 { 5789 struct loc_track *t = seq->private; 5790 5791 v = ppos; 5792 ++*ppos; 5793 if (*ppos <= t->count) 5794 return v; 5795 5796 return NULL; 5797 } 5798 5799 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 5800 { 5801 return ppos; 5802 } 5803 5804 static const struct seq_operations slab_debugfs_sops = { 5805 .start = slab_debugfs_start, 5806 .next = slab_debugfs_next, 5807 .stop = slab_debugfs_stop, 5808 .show = slab_debugfs_show, 5809 }; 5810 5811 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 5812 { 5813 5814 struct kmem_cache_node *n; 5815 enum track_item alloc; 5816 int node; 5817 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 5818 sizeof(struct loc_track)); 5819 struct kmem_cache *s = file_inode(filep)->i_private; 5820 5821 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 5822 alloc = TRACK_ALLOC; 5823 else 5824 alloc = TRACK_FREE; 5825 5826 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) 5827 return -ENOMEM; 5828 5829 /* Push back cpu slabs */ 5830 flush_all(s); 5831 5832 for_each_kmem_cache_node(s, node, n) { 5833 unsigned long flags; 5834 struct page *page; 5835 5836 if (!atomic_long_read(&n->nr_slabs)) 5837 continue; 5838 5839 spin_lock_irqsave(&n->list_lock, flags); 5840 list_for_each_entry(page, &n->partial, slab_list) 5841 process_slab(t, s, page, alloc); 5842 list_for_each_entry(page, &n->full, slab_list) 5843 process_slab(t, s, page, alloc); 5844 spin_unlock_irqrestore(&n->list_lock, flags); 5845 } 5846 5847 return 0; 5848 } 5849 5850 static int slab_debug_trace_release(struct inode *inode, struct file *file) 5851 { 5852 struct seq_file *seq = file->private_data; 5853 struct loc_track *t = seq->private; 5854 5855 free_loc_track(t); 5856 return seq_release_private(inode, file); 5857 } 5858 5859 static const struct file_operations slab_debugfs_fops = { 5860 .open = slab_debug_trace_open, 5861 .read = seq_read, 5862 .llseek = seq_lseek, 5863 .release = slab_debug_trace_release, 5864 }; 5865 5866 static void debugfs_slab_add(struct kmem_cache *s) 5867 { 5868 struct dentry *slab_cache_dir; 5869 5870 if (unlikely(!slab_debugfs_root)) 5871 return; 5872 5873 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 5874 5875 debugfs_create_file("alloc_traces", 0400, 5876 slab_cache_dir, s, &slab_debugfs_fops); 5877 5878 debugfs_create_file("free_traces", 0400, 5879 slab_cache_dir, s, &slab_debugfs_fops); 5880 } 5881 5882 void debugfs_slab_release(struct kmem_cache *s) 5883 { 5884 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); 5885 } 5886 5887 static int __init slab_debugfs_init(void) 5888 { 5889 struct kmem_cache *s; 5890 5891 slab_debugfs_root = debugfs_create_dir("slab", NULL); 5892 5893 list_for_each_entry(s, &slab_caches, list) 5894 if (s->flags & SLAB_STORE_USER) 5895 debugfs_slab_add(s); 5896 5897 return 0; 5898 5899 } 5900 __initcall(slab_debugfs_init); 5901 #endif 5902 /* 5903 * The /proc/slabinfo ABI 5904 */ 5905 #ifdef CONFIG_SLUB_DEBUG 5906 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5907 { 5908 unsigned long nr_slabs = 0; 5909 unsigned long nr_objs = 0; 5910 unsigned long nr_free = 0; 5911 int node; 5912 struct kmem_cache_node *n; 5913 5914 for_each_kmem_cache_node(s, node, n) { 5915 nr_slabs += node_nr_slabs(n); 5916 nr_objs += node_nr_objs(n); 5917 nr_free += count_partial(n, count_free); 5918 } 5919 5920 sinfo->active_objs = nr_objs - nr_free; 5921 sinfo->num_objs = nr_objs; 5922 sinfo->active_slabs = nr_slabs; 5923 sinfo->num_slabs = nr_slabs; 5924 sinfo->objects_per_slab = oo_objects(s->oo); 5925 sinfo->cache_order = oo_order(s->oo); 5926 } 5927 5928 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 5929 { 5930 } 5931 5932 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 5933 size_t count, loff_t *ppos) 5934 { 5935 return -EIO; 5936 } 5937 #endif /* CONFIG_SLUB_DEBUG */ 5938