1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operatios 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/bitops.h> 19 #include <linux/slab.h> 20 #include "slab.h" 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/kasan.h> 24 #include <linux/cpu.h> 25 #include <linux/cpuset.h> 26 #include <linux/mempolicy.h> 27 #include <linux/ctype.h> 28 #include <linux/debugobjects.h> 29 #include <linux/kallsyms.h> 30 #include <linux/kfence.h> 31 #include <linux/memory.h> 32 #include <linux/math64.h> 33 #include <linux/fault-inject.h> 34 #include <linux/stacktrace.h> 35 #include <linux/prefetch.h> 36 #include <linux/memcontrol.h> 37 #include <linux/random.h> 38 39 #include <trace/events/kmem.h> 40 41 #include "internal.h" 42 43 /* 44 * Lock order: 45 * 1. slab_mutex (Global Mutex) 46 * 2. node->list_lock 47 * 3. slab_lock(page) (Only on some arches and for debugging) 48 * 49 * slab_mutex 50 * 51 * The role of the slab_mutex is to protect the list of all the slabs 52 * and to synchronize major metadata changes to slab cache structures. 53 * 54 * The slab_lock is only used for debugging and on arches that do not 55 * have the ability to do a cmpxchg_double. It only protects: 56 * A. page->freelist -> List of object free in a page 57 * B. page->inuse -> Number of objects in use 58 * C. page->objects -> Number of objects in page 59 * D. page->frozen -> frozen state 60 * 61 * If a slab is frozen then it is exempt from list management. It is not 62 * on any list except per cpu partial list. The processor that froze the 63 * slab is the one who can perform list operations on the page. Other 64 * processors may put objects onto the freelist but the processor that 65 * froze the slab is the only one that can retrieve the objects from the 66 * page's freelist. 67 * 68 * The list_lock protects the partial and full list on each node and 69 * the partial slab counter. If taken then no new slabs may be added or 70 * removed from the lists nor make the number of partial slabs be modified. 71 * (Note that the total number of slabs is an atomic value that may be 72 * modified without taking the list lock). 73 * 74 * The list_lock is a centralized lock and thus we avoid taking it as 75 * much as possible. As long as SLUB does not have to handle partial 76 * slabs, operations can continue without any centralized lock. F.e. 77 * allocating a long series of objects that fill up slabs does not require 78 * the list lock. 79 * Interrupts are disabled during allocation and deallocation in order to 80 * make the slab allocator safe to use in the context of an irq. In addition 81 * interrupts are disabled to ensure that the processor does not change 82 * while handling per_cpu slabs, due to kernel preemption. 83 * 84 * SLUB assigns one slab for allocation to each processor. 85 * Allocations only occur from these slabs called cpu slabs. 86 * 87 * Slabs with free elements are kept on a partial list and during regular 88 * operations no list for full slabs is used. If an object in a full slab is 89 * freed then the slab will show up again on the partial lists. 90 * We track full slabs for debugging purposes though because otherwise we 91 * cannot scan all objects. 92 * 93 * Slabs are freed when they become empty. Teardown and setup is 94 * minimal so we rely on the page allocators per cpu caches for 95 * fast frees and allocs. 96 * 97 * page->frozen The slab is frozen and exempt from list processing. 98 * This means that the slab is dedicated to a purpose 99 * such as satisfying allocations for a specific 100 * processor. Objects may be freed in the slab while 101 * it is frozen but slab_free will then skip the usual 102 * list operations. It is up to the processor holding 103 * the slab to integrate the slab into the slab lists 104 * when the slab is no longer needed. 105 * 106 * One use of this flag is to mark slabs that are 107 * used for allocations. Then such a slab becomes a cpu 108 * slab. The cpu slab may be equipped with an additional 109 * freelist that allows lockless access to 110 * free objects in addition to the regular freelist 111 * that requires the slab lock. 112 * 113 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 114 * options set. This moves slab handling out of 115 * the fast path and disables lockless freelists. 116 */ 117 118 #ifdef CONFIG_SLUB_DEBUG 119 #ifdef CONFIG_SLUB_DEBUG_ON 120 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 121 #else 122 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 123 #endif 124 #endif 125 126 static inline bool kmem_cache_debug(struct kmem_cache *s) 127 { 128 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 129 } 130 131 void *fixup_red_left(struct kmem_cache *s, void *p) 132 { 133 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 134 p += s->red_left_pad; 135 136 return p; 137 } 138 139 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 140 { 141 #ifdef CONFIG_SLUB_CPU_PARTIAL 142 return !kmem_cache_debug(s); 143 #else 144 return false; 145 #endif 146 } 147 148 /* 149 * Issues still to be resolved: 150 * 151 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 152 * 153 * - Variable sizing of the per node arrays 154 */ 155 156 /* Enable to test recovery from slab corruption on boot */ 157 #undef SLUB_RESILIENCY_TEST 158 159 /* Enable to log cmpxchg failures */ 160 #undef SLUB_DEBUG_CMPXCHG 161 162 /* 163 * Mininum number of partial slabs. These will be left on the partial 164 * lists even if they are empty. kmem_cache_shrink may reclaim them. 165 */ 166 #define MIN_PARTIAL 5 167 168 /* 169 * Maximum number of desirable partial slabs. 170 * The existence of more partial slabs makes kmem_cache_shrink 171 * sort the partial list by the number of objects in use. 172 */ 173 #define MAX_PARTIAL 10 174 175 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 176 SLAB_POISON | SLAB_STORE_USER) 177 178 /* 179 * These debug flags cannot use CMPXCHG because there might be consistency 180 * issues when checking or reading debug information 181 */ 182 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 183 SLAB_TRACE) 184 185 186 /* 187 * Debugging flags that require metadata to be stored in the slab. These get 188 * disabled when slub_debug=O is used and a cache's min order increases with 189 * metadata. 190 */ 191 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 192 193 #define OO_SHIFT 16 194 #define OO_MASK ((1 << OO_SHIFT) - 1) 195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 196 197 /* Internal SLUB flags */ 198 /* Poison object */ 199 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 200 /* Use cmpxchg_double */ 201 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 202 203 /* 204 * Tracking user of a slab. 205 */ 206 #define TRACK_ADDRS_COUNT 16 207 struct track { 208 unsigned long addr; /* Called from address */ 209 #ifdef CONFIG_STACKTRACE 210 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 211 #endif 212 int cpu; /* Was running on cpu */ 213 int pid; /* Pid context */ 214 unsigned long when; /* When did the operation occur */ 215 }; 216 217 enum track_item { TRACK_ALLOC, TRACK_FREE }; 218 219 #ifdef CONFIG_SYSFS 220 static int sysfs_slab_add(struct kmem_cache *); 221 static int sysfs_slab_alias(struct kmem_cache *, const char *); 222 #else 223 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 224 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 225 { return 0; } 226 #endif 227 228 static inline void stat(const struct kmem_cache *s, enum stat_item si) 229 { 230 #ifdef CONFIG_SLUB_STATS 231 /* 232 * The rmw is racy on a preemptible kernel but this is acceptable, so 233 * avoid this_cpu_add()'s irq-disable overhead. 234 */ 235 raw_cpu_inc(s->cpu_slab->stat[si]); 236 #endif 237 } 238 239 /* 240 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 241 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 242 * differ during memory hotplug/hotremove operations. 243 * Protected by slab_mutex. 244 */ 245 static nodemask_t slab_nodes; 246 247 /******************************************************************** 248 * Core slab cache functions 249 *******************************************************************/ 250 251 /* 252 * Returns freelist pointer (ptr). With hardening, this is obfuscated 253 * with an XOR of the address where the pointer is held and a per-cache 254 * random number. 255 */ 256 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 257 unsigned long ptr_addr) 258 { 259 #ifdef CONFIG_SLAB_FREELIST_HARDENED 260 /* 261 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 262 * Normally, this doesn't cause any issues, as both set_freepointer() 263 * and get_freepointer() are called with a pointer with the same tag. 264 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 265 * example, when __free_slub() iterates over objects in a cache, it 266 * passes untagged pointers to check_object(). check_object() in turns 267 * calls get_freepointer() with an untagged pointer, which causes the 268 * freepointer to be restored incorrectly. 269 */ 270 return (void *)((unsigned long)ptr ^ s->random ^ 271 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 272 #else 273 return ptr; 274 #endif 275 } 276 277 /* Returns the freelist pointer recorded at location ptr_addr. */ 278 static inline void *freelist_dereference(const struct kmem_cache *s, 279 void *ptr_addr) 280 { 281 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 282 (unsigned long)ptr_addr); 283 } 284 285 static inline void *get_freepointer(struct kmem_cache *s, void *object) 286 { 287 object = kasan_reset_tag(object); 288 return freelist_dereference(s, object + s->offset); 289 } 290 291 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 292 { 293 prefetch(object + s->offset); 294 } 295 296 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 297 { 298 unsigned long freepointer_addr; 299 void *p; 300 301 if (!debug_pagealloc_enabled_static()) 302 return get_freepointer(s, object); 303 304 freepointer_addr = (unsigned long)object + s->offset; 305 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 306 return freelist_ptr(s, p, freepointer_addr); 307 } 308 309 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 310 { 311 unsigned long freeptr_addr = (unsigned long)object + s->offset; 312 313 #ifdef CONFIG_SLAB_FREELIST_HARDENED 314 BUG_ON(object == fp); /* naive detection of double free or corruption */ 315 #endif 316 317 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 318 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 319 } 320 321 /* Loop over all objects in a slab */ 322 #define for_each_object(__p, __s, __addr, __objects) \ 323 for (__p = fixup_red_left(__s, __addr); \ 324 __p < (__addr) + (__objects) * (__s)->size; \ 325 __p += (__s)->size) 326 327 static inline unsigned int order_objects(unsigned int order, unsigned int size) 328 { 329 return ((unsigned int)PAGE_SIZE << order) / size; 330 } 331 332 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 333 unsigned int size) 334 { 335 struct kmem_cache_order_objects x = { 336 (order << OO_SHIFT) + order_objects(order, size) 337 }; 338 339 return x; 340 } 341 342 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 343 { 344 return x.x >> OO_SHIFT; 345 } 346 347 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 348 { 349 return x.x & OO_MASK; 350 } 351 352 /* 353 * Per slab locking using the pagelock 354 */ 355 static __always_inline void slab_lock(struct page *page) 356 { 357 VM_BUG_ON_PAGE(PageTail(page), page); 358 bit_spin_lock(PG_locked, &page->flags); 359 } 360 361 static __always_inline void slab_unlock(struct page *page) 362 { 363 VM_BUG_ON_PAGE(PageTail(page), page); 364 __bit_spin_unlock(PG_locked, &page->flags); 365 } 366 367 /* Interrupts must be disabled (for the fallback code to work right) */ 368 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 369 void *freelist_old, unsigned long counters_old, 370 void *freelist_new, unsigned long counters_new, 371 const char *n) 372 { 373 VM_BUG_ON(!irqs_disabled()); 374 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 375 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 376 if (s->flags & __CMPXCHG_DOUBLE) { 377 if (cmpxchg_double(&page->freelist, &page->counters, 378 freelist_old, counters_old, 379 freelist_new, counters_new)) 380 return true; 381 } else 382 #endif 383 { 384 slab_lock(page); 385 if (page->freelist == freelist_old && 386 page->counters == counters_old) { 387 page->freelist = freelist_new; 388 page->counters = counters_new; 389 slab_unlock(page); 390 return true; 391 } 392 slab_unlock(page); 393 } 394 395 cpu_relax(); 396 stat(s, CMPXCHG_DOUBLE_FAIL); 397 398 #ifdef SLUB_DEBUG_CMPXCHG 399 pr_info("%s %s: cmpxchg double redo ", n, s->name); 400 #endif 401 402 return false; 403 } 404 405 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 406 void *freelist_old, unsigned long counters_old, 407 void *freelist_new, unsigned long counters_new, 408 const char *n) 409 { 410 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 411 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 412 if (s->flags & __CMPXCHG_DOUBLE) { 413 if (cmpxchg_double(&page->freelist, &page->counters, 414 freelist_old, counters_old, 415 freelist_new, counters_new)) 416 return true; 417 } else 418 #endif 419 { 420 unsigned long flags; 421 422 local_irq_save(flags); 423 slab_lock(page); 424 if (page->freelist == freelist_old && 425 page->counters == counters_old) { 426 page->freelist = freelist_new; 427 page->counters = counters_new; 428 slab_unlock(page); 429 local_irq_restore(flags); 430 return true; 431 } 432 slab_unlock(page); 433 local_irq_restore(flags); 434 } 435 436 cpu_relax(); 437 stat(s, CMPXCHG_DOUBLE_FAIL); 438 439 #ifdef SLUB_DEBUG_CMPXCHG 440 pr_info("%s %s: cmpxchg double redo ", n, s->name); 441 #endif 442 443 return false; 444 } 445 446 #ifdef CONFIG_SLUB_DEBUG 447 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 448 static DEFINE_SPINLOCK(object_map_lock); 449 450 /* 451 * Determine a map of object in use on a page. 452 * 453 * Node listlock must be held to guarantee that the page does 454 * not vanish from under us. 455 */ 456 static unsigned long *get_map(struct kmem_cache *s, struct page *page) 457 __acquires(&object_map_lock) 458 { 459 void *p; 460 void *addr = page_address(page); 461 462 VM_BUG_ON(!irqs_disabled()); 463 464 spin_lock(&object_map_lock); 465 466 bitmap_zero(object_map, page->objects); 467 468 for (p = page->freelist; p; p = get_freepointer(s, p)) 469 set_bit(__obj_to_index(s, addr, p), object_map); 470 471 return object_map; 472 } 473 474 static void put_map(unsigned long *map) __releases(&object_map_lock) 475 { 476 VM_BUG_ON(map != object_map); 477 spin_unlock(&object_map_lock); 478 } 479 480 static inline unsigned int size_from_object(struct kmem_cache *s) 481 { 482 if (s->flags & SLAB_RED_ZONE) 483 return s->size - s->red_left_pad; 484 485 return s->size; 486 } 487 488 static inline void *restore_red_left(struct kmem_cache *s, void *p) 489 { 490 if (s->flags & SLAB_RED_ZONE) 491 p -= s->red_left_pad; 492 493 return p; 494 } 495 496 /* 497 * Debug settings: 498 */ 499 #if defined(CONFIG_SLUB_DEBUG_ON) 500 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 501 #else 502 static slab_flags_t slub_debug; 503 #endif 504 505 static char *slub_debug_string; 506 static int disable_higher_order_debug; 507 508 /* 509 * slub is about to manipulate internal object metadata. This memory lies 510 * outside the range of the allocated object, so accessing it would normally 511 * be reported by kasan as a bounds error. metadata_access_enable() is used 512 * to tell kasan that these accesses are OK. 513 */ 514 static inline void metadata_access_enable(void) 515 { 516 kasan_disable_current(); 517 } 518 519 static inline void metadata_access_disable(void) 520 { 521 kasan_enable_current(); 522 } 523 524 /* 525 * Object debugging 526 */ 527 528 /* Verify that a pointer has an address that is valid within a slab page */ 529 static inline int check_valid_pointer(struct kmem_cache *s, 530 struct page *page, void *object) 531 { 532 void *base; 533 534 if (!object) 535 return 1; 536 537 base = page_address(page); 538 object = kasan_reset_tag(object); 539 object = restore_red_left(s, object); 540 if (object < base || object >= base + page->objects * s->size || 541 (object - base) % s->size) { 542 return 0; 543 } 544 545 return 1; 546 } 547 548 static void print_section(char *level, char *text, u8 *addr, 549 unsigned int length) 550 { 551 metadata_access_enable(); 552 print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, 553 16, 1, addr, length, 1); 554 metadata_access_disable(); 555 } 556 557 /* 558 * See comment in calculate_sizes(). 559 */ 560 static inline bool freeptr_outside_object(struct kmem_cache *s) 561 { 562 return s->offset >= s->inuse; 563 } 564 565 /* 566 * Return offset of the end of info block which is inuse + free pointer if 567 * not overlapping with object. 568 */ 569 static inline unsigned int get_info_end(struct kmem_cache *s) 570 { 571 if (freeptr_outside_object(s)) 572 return s->inuse + sizeof(void *); 573 else 574 return s->inuse; 575 } 576 577 static struct track *get_track(struct kmem_cache *s, void *object, 578 enum track_item alloc) 579 { 580 struct track *p; 581 582 p = object + get_info_end(s); 583 584 return kasan_reset_tag(p + alloc); 585 } 586 587 static void set_track(struct kmem_cache *s, void *object, 588 enum track_item alloc, unsigned long addr) 589 { 590 struct track *p = get_track(s, object, alloc); 591 592 if (addr) { 593 #ifdef CONFIG_STACKTRACE 594 unsigned int nr_entries; 595 596 metadata_access_enable(); 597 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), 598 TRACK_ADDRS_COUNT, 3); 599 metadata_access_disable(); 600 601 if (nr_entries < TRACK_ADDRS_COUNT) 602 p->addrs[nr_entries] = 0; 603 #endif 604 p->addr = addr; 605 p->cpu = smp_processor_id(); 606 p->pid = current->pid; 607 p->when = jiffies; 608 } else { 609 memset(p, 0, sizeof(struct track)); 610 } 611 } 612 613 static void init_tracking(struct kmem_cache *s, void *object) 614 { 615 if (!(s->flags & SLAB_STORE_USER)) 616 return; 617 618 set_track(s, object, TRACK_FREE, 0UL); 619 set_track(s, object, TRACK_ALLOC, 0UL); 620 } 621 622 static void print_track(const char *s, struct track *t, unsigned long pr_time) 623 { 624 if (!t->addr) 625 return; 626 627 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 628 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 629 #ifdef CONFIG_STACKTRACE 630 { 631 int i; 632 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 633 if (t->addrs[i]) 634 pr_err("\t%pS\n", (void *)t->addrs[i]); 635 else 636 break; 637 } 638 #endif 639 } 640 641 void print_tracking(struct kmem_cache *s, void *object) 642 { 643 unsigned long pr_time = jiffies; 644 if (!(s->flags & SLAB_STORE_USER)) 645 return; 646 647 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 648 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 649 } 650 651 static void print_page_info(struct page *page) 652 { 653 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", 654 page, page->objects, page->inuse, page->freelist, 655 page->flags, &page->flags); 656 657 } 658 659 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 660 { 661 struct va_format vaf; 662 va_list args; 663 664 va_start(args, fmt); 665 vaf.fmt = fmt; 666 vaf.va = &args; 667 pr_err("=============================================================================\n"); 668 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 669 pr_err("-----------------------------------------------------------------------------\n\n"); 670 671 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 672 va_end(args); 673 } 674 675 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 676 { 677 struct va_format vaf; 678 va_list args; 679 680 va_start(args, fmt); 681 vaf.fmt = fmt; 682 vaf.va = &args; 683 pr_err("FIX %s: %pV\n", s->name, &vaf); 684 va_end(args); 685 } 686 687 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 688 void **freelist, void *nextfree) 689 { 690 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 691 !check_valid_pointer(s, page, nextfree) && freelist) { 692 object_err(s, page, *freelist, "Freechain corrupt"); 693 *freelist = NULL; 694 slab_fix(s, "Isolate corrupted freechain"); 695 return true; 696 } 697 698 return false; 699 } 700 701 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 702 { 703 unsigned int off; /* Offset of last byte */ 704 u8 *addr = page_address(page); 705 706 print_tracking(s, p); 707 708 print_page_info(page); 709 710 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 711 p, p - addr, get_freepointer(s, p)); 712 713 if (s->flags & SLAB_RED_ZONE) 714 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 715 s->red_left_pad); 716 else if (p > addr + 16) 717 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 718 719 print_section(KERN_ERR, "Object ", p, 720 min_t(unsigned int, s->object_size, PAGE_SIZE)); 721 if (s->flags & SLAB_RED_ZONE) 722 print_section(KERN_ERR, "Redzone ", p + s->object_size, 723 s->inuse - s->object_size); 724 725 off = get_info_end(s); 726 727 if (s->flags & SLAB_STORE_USER) 728 off += 2 * sizeof(struct track); 729 730 off += kasan_metadata_size(s); 731 732 if (off != size_from_object(s)) 733 /* Beginning of the filler is the free pointer */ 734 print_section(KERN_ERR, "Padding ", p + off, 735 size_from_object(s) - off); 736 737 dump_stack(); 738 } 739 740 void object_err(struct kmem_cache *s, struct page *page, 741 u8 *object, char *reason) 742 { 743 slab_bug(s, "%s", reason); 744 print_trailer(s, page, object); 745 } 746 747 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, 748 const char *fmt, ...) 749 { 750 va_list args; 751 char buf[100]; 752 753 va_start(args, fmt); 754 vsnprintf(buf, sizeof(buf), fmt, args); 755 va_end(args); 756 slab_bug(s, "%s", buf); 757 print_page_info(page); 758 dump_stack(); 759 } 760 761 static void init_object(struct kmem_cache *s, void *object, u8 val) 762 { 763 u8 *p = kasan_reset_tag(object); 764 765 if (s->flags & SLAB_RED_ZONE) 766 memset(p - s->red_left_pad, val, s->red_left_pad); 767 768 if (s->flags & __OBJECT_POISON) { 769 memset(p, POISON_FREE, s->object_size - 1); 770 p[s->object_size - 1] = POISON_END; 771 } 772 773 if (s->flags & SLAB_RED_ZONE) 774 memset(p + s->object_size, val, s->inuse - s->object_size); 775 } 776 777 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 778 void *from, void *to) 779 { 780 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); 781 memset(from, data, to - from); 782 } 783 784 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 785 u8 *object, char *what, 786 u8 *start, unsigned int value, unsigned int bytes) 787 { 788 u8 *fault; 789 u8 *end; 790 u8 *addr = page_address(page); 791 792 metadata_access_enable(); 793 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 794 metadata_access_disable(); 795 if (!fault) 796 return 1; 797 798 end = start + bytes; 799 while (end > fault && end[-1] == value) 800 end--; 801 802 slab_bug(s, "%s overwritten", what); 803 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 804 fault, end - 1, fault - addr, 805 fault[0], value); 806 print_trailer(s, page, object); 807 808 restore_bytes(s, what, value, fault, end); 809 return 0; 810 } 811 812 /* 813 * Object layout: 814 * 815 * object address 816 * Bytes of the object to be managed. 817 * If the freepointer may overlay the object then the free 818 * pointer is at the middle of the object. 819 * 820 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 821 * 0xa5 (POISON_END) 822 * 823 * object + s->object_size 824 * Padding to reach word boundary. This is also used for Redzoning. 825 * Padding is extended by another word if Redzoning is enabled and 826 * object_size == inuse. 827 * 828 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 829 * 0xcc (RED_ACTIVE) for objects in use. 830 * 831 * object + s->inuse 832 * Meta data starts here. 833 * 834 * A. Free pointer (if we cannot overwrite object on free) 835 * B. Tracking data for SLAB_STORE_USER 836 * C. Padding to reach required alignment boundary or at mininum 837 * one word if debugging is on to be able to detect writes 838 * before the word boundary. 839 * 840 * Padding is done using 0x5a (POISON_INUSE) 841 * 842 * object + s->size 843 * Nothing is used beyond s->size. 844 * 845 * If slabcaches are merged then the object_size and inuse boundaries are mostly 846 * ignored. And therefore no slab options that rely on these boundaries 847 * may be used with merged slabcaches. 848 */ 849 850 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 851 { 852 unsigned long off = get_info_end(s); /* The end of info */ 853 854 if (s->flags & SLAB_STORE_USER) 855 /* We also have user information there */ 856 off += 2 * sizeof(struct track); 857 858 off += kasan_metadata_size(s); 859 860 if (size_from_object(s) == off) 861 return 1; 862 863 return check_bytes_and_report(s, page, p, "Object padding", 864 p + off, POISON_INUSE, size_from_object(s) - off); 865 } 866 867 /* Check the pad bytes at the end of a slab page */ 868 static int slab_pad_check(struct kmem_cache *s, struct page *page) 869 { 870 u8 *start; 871 u8 *fault; 872 u8 *end; 873 u8 *pad; 874 int length; 875 int remainder; 876 877 if (!(s->flags & SLAB_POISON)) 878 return 1; 879 880 start = page_address(page); 881 length = page_size(page); 882 end = start + length; 883 remainder = length % s->size; 884 if (!remainder) 885 return 1; 886 887 pad = end - remainder; 888 metadata_access_enable(); 889 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 890 metadata_access_disable(); 891 if (!fault) 892 return 1; 893 while (end > fault && end[-1] == POISON_INUSE) 894 end--; 895 896 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", 897 fault, end - 1, fault - start); 898 print_section(KERN_ERR, "Padding ", pad, remainder); 899 900 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 901 return 0; 902 } 903 904 static int check_object(struct kmem_cache *s, struct page *page, 905 void *object, u8 val) 906 { 907 u8 *p = object; 908 u8 *endobject = object + s->object_size; 909 910 if (s->flags & SLAB_RED_ZONE) { 911 if (!check_bytes_and_report(s, page, object, "Redzone", 912 object - s->red_left_pad, val, s->red_left_pad)) 913 return 0; 914 915 if (!check_bytes_and_report(s, page, object, "Redzone", 916 endobject, val, s->inuse - s->object_size)) 917 return 0; 918 } else { 919 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 920 check_bytes_and_report(s, page, p, "Alignment padding", 921 endobject, POISON_INUSE, 922 s->inuse - s->object_size); 923 } 924 } 925 926 if (s->flags & SLAB_POISON) { 927 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 928 (!check_bytes_and_report(s, page, p, "Poison", p, 929 POISON_FREE, s->object_size - 1) || 930 !check_bytes_and_report(s, page, p, "Poison", 931 p + s->object_size - 1, POISON_END, 1))) 932 return 0; 933 /* 934 * check_pad_bytes cleans up on its own. 935 */ 936 check_pad_bytes(s, page, p); 937 } 938 939 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 940 /* 941 * Object and freepointer overlap. Cannot check 942 * freepointer while object is allocated. 943 */ 944 return 1; 945 946 /* Check free pointer validity */ 947 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 948 object_err(s, page, p, "Freepointer corrupt"); 949 /* 950 * No choice but to zap it and thus lose the remainder 951 * of the free objects in this slab. May cause 952 * another error because the object count is now wrong. 953 */ 954 set_freepointer(s, p, NULL); 955 return 0; 956 } 957 return 1; 958 } 959 960 static int check_slab(struct kmem_cache *s, struct page *page) 961 { 962 int maxobj; 963 964 VM_BUG_ON(!irqs_disabled()); 965 966 if (!PageSlab(page)) { 967 slab_err(s, page, "Not a valid slab page"); 968 return 0; 969 } 970 971 maxobj = order_objects(compound_order(page), s->size); 972 if (page->objects > maxobj) { 973 slab_err(s, page, "objects %u > max %u", 974 page->objects, maxobj); 975 return 0; 976 } 977 if (page->inuse > page->objects) { 978 slab_err(s, page, "inuse %u > max %u", 979 page->inuse, page->objects); 980 return 0; 981 } 982 /* Slab_pad_check fixes things up after itself */ 983 slab_pad_check(s, page); 984 return 1; 985 } 986 987 /* 988 * Determine if a certain object on a page is on the freelist. Must hold the 989 * slab lock to guarantee that the chains are in a consistent state. 990 */ 991 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 992 { 993 int nr = 0; 994 void *fp; 995 void *object = NULL; 996 int max_objects; 997 998 fp = page->freelist; 999 while (fp && nr <= page->objects) { 1000 if (fp == search) 1001 return 1; 1002 if (!check_valid_pointer(s, page, fp)) { 1003 if (object) { 1004 object_err(s, page, object, 1005 "Freechain corrupt"); 1006 set_freepointer(s, object, NULL); 1007 } else { 1008 slab_err(s, page, "Freepointer corrupt"); 1009 page->freelist = NULL; 1010 page->inuse = page->objects; 1011 slab_fix(s, "Freelist cleared"); 1012 return 0; 1013 } 1014 break; 1015 } 1016 object = fp; 1017 fp = get_freepointer(s, object); 1018 nr++; 1019 } 1020 1021 max_objects = order_objects(compound_order(page), s->size); 1022 if (max_objects > MAX_OBJS_PER_PAGE) 1023 max_objects = MAX_OBJS_PER_PAGE; 1024 1025 if (page->objects != max_objects) { 1026 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 1027 page->objects, max_objects); 1028 page->objects = max_objects; 1029 slab_fix(s, "Number of objects adjusted."); 1030 } 1031 if (page->inuse != page->objects - nr) { 1032 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1033 page->inuse, page->objects - nr); 1034 page->inuse = page->objects - nr; 1035 slab_fix(s, "Object count adjusted."); 1036 } 1037 return search == NULL; 1038 } 1039 1040 static void trace(struct kmem_cache *s, struct page *page, void *object, 1041 int alloc) 1042 { 1043 if (s->flags & SLAB_TRACE) { 1044 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1045 s->name, 1046 alloc ? "alloc" : "free", 1047 object, page->inuse, 1048 page->freelist); 1049 1050 if (!alloc) 1051 print_section(KERN_INFO, "Object ", (void *)object, 1052 s->object_size); 1053 1054 dump_stack(); 1055 } 1056 } 1057 1058 /* 1059 * Tracking of fully allocated slabs for debugging purposes. 1060 */ 1061 static void add_full(struct kmem_cache *s, 1062 struct kmem_cache_node *n, struct page *page) 1063 { 1064 if (!(s->flags & SLAB_STORE_USER)) 1065 return; 1066 1067 lockdep_assert_held(&n->list_lock); 1068 list_add(&page->slab_list, &n->full); 1069 } 1070 1071 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1072 { 1073 if (!(s->flags & SLAB_STORE_USER)) 1074 return; 1075 1076 lockdep_assert_held(&n->list_lock); 1077 list_del(&page->slab_list); 1078 } 1079 1080 /* Tracking of the number of slabs for debugging purposes */ 1081 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1082 { 1083 struct kmem_cache_node *n = get_node(s, node); 1084 1085 return atomic_long_read(&n->nr_slabs); 1086 } 1087 1088 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1089 { 1090 return atomic_long_read(&n->nr_slabs); 1091 } 1092 1093 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1094 { 1095 struct kmem_cache_node *n = get_node(s, node); 1096 1097 /* 1098 * May be called early in order to allocate a slab for the 1099 * kmem_cache_node structure. Solve the chicken-egg 1100 * dilemma by deferring the increment of the count during 1101 * bootstrap (see early_kmem_cache_node_alloc). 1102 */ 1103 if (likely(n)) { 1104 atomic_long_inc(&n->nr_slabs); 1105 atomic_long_add(objects, &n->total_objects); 1106 } 1107 } 1108 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1109 { 1110 struct kmem_cache_node *n = get_node(s, node); 1111 1112 atomic_long_dec(&n->nr_slabs); 1113 atomic_long_sub(objects, &n->total_objects); 1114 } 1115 1116 /* Object debug checks for alloc/free paths */ 1117 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1118 void *object) 1119 { 1120 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1121 return; 1122 1123 init_object(s, object, SLUB_RED_INACTIVE); 1124 init_tracking(s, object); 1125 } 1126 1127 static 1128 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1129 { 1130 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1131 return; 1132 1133 metadata_access_enable(); 1134 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page)); 1135 metadata_access_disable(); 1136 } 1137 1138 static inline int alloc_consistency_checks(struct kmem_cache *s, 1139 struct page *page, void *object) 1140 { 1141 if (!check_slab(s, page)) 1142 return 0; 1143 1144 if (!check_valid_pointer(s, page, object)) { 1145 object_err(s, page, object, "Freelist Pointer check fails"); 1146 return 0; 1147 } 1148 1149 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1150 return 0; 1151 1152 return 1; 1153 } 1154 1155 static noinline int alloc_debug_processing(struct kmem_cache *s, 1156 struct page *page, 1157 void *object, unsigned long addr) 1158 { 1159 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1160 if (!alloc_consistency_checks(s, page, object)) 1161 goto bad; 1162 } 1163 1164 /* Success perform special debug activities for allocs */ 1165 if (s->flags & SLAB_STORE_USER) 1166 set_track(s, object, TRACK_ALLOC, addr); 1167 trace(s, page, object, 1); 1168 init_object(s, object, SLUB_RED_ACTIVE); 1169 return 1; 1170 1171 bad: 1172 if (PageSlab(page)) { 1173 /* 1174 * If this is a slab page then lets do the best we can 1175 * to avoid issues in the future. Marking all objects 1176 * as used avoids touching the remaining objects. 1177 */ 1178 slab_fix(s, "Marking all objects used"); 1179 page->inuse = page->objects; 1180 page->freelist = NULL; 1181 } 1182 return 0; 1183 } 1184 1185 static inline int free_consistency_checks(struct kmem_cache *s, 1186 struct page *page, void *object, unsigned long addr) 1187 { 1188 if (!check_valid_pointer(s, page, object)) { 1189 slab_err(s, page, "Invalid object pointer 0x%p", object); 1190 return 0; 1191 } 1192 1193 if (on_freelist(s, page, object)) { 1194 object_err(s, page, object, "Object already free"); 1195 return 0; 1196 } 1197 1198 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1199 return 0; 1200 1201 if (unlikely(s != page->slab_cache)) { 1202 if (!PageSlab(page)) { 1203 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1204 object); 1205 } else if (!page->slab_cache) { 1206 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1207 object); 1208 dump_stack(); 1209 } else 1210 object_err(s, page, object, 1211 "page slab pointer corrupt."); 1212 return 0; 1213 } 1214 return 1; 1215 } 1216 1217 /* Supports checking bulk free of a constructed freelist */ 1218 static noinline int free_debug_processing( 1219 struct kmem_cache *s, struct page *page, 1220 void *head, void *tail, int bulk_cnt, 1221 unsigned long addr) 1222 { 1223 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1224 void *object = head; 1225 int cnt = 0; 1226 unsigned long flags; 1227 int ret = 0; 1228 1229 spin_lock_irqsave(&n->list_lock, flags); 1230 slab_lock(page); 1231 1232 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1233 if (!check_slab(s, page)) 1234 goto out; 1235 } 1236 1237 next_object: 1238 cnt++; 1239 1240 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1241 if (!free_consistency_checks(s, page, object, addr)) 1242 goto out; 1243 } 1244 1245 if (s->flags & SLAB_STORE_USER) 1246 set_track(s, object, TRACK_FREE, addr); 1247 trace(s, page, object, 0); 1248 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1249 init_object(s, object, SLUB_RED_INACTIVE); 1250 1251 /* Reached end of constructed freelist yet? */ 1252 if (object != tail) { 1253 object = get_freepointer(s, object); 1254 goto next_object; 1255 } 1256 ret = 1; 1257 1258 out: 1259 if (cnt != bulk_cnt) 1260 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1261 bulk_cnt, cnt); 1262 1263 slab_unlock(page); 1264 spin_unlock_irqrestore(&n->list_lock, flags); 1265 if (!ret) 1266 slab_fix(s, "Object at 0x%p not freed", object); 1267 return ret; 1268 } 1269 1270 /* 1271 * Parse a block of slub_debug options. Blocks are delimited by ';' 1272 * 1273 * @str: start of block 1274 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1275 * @slabs: return start of list of slabs, or NULL when there's no list 1276 * @init: assume this is initial parsing and not per-kmem-create parsing 1277 * 1278 * returns the start of next block if there's any, or NULL 1279 */ 1280 static char * 1281 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1282 { 1283 bool higher_order_disable = false; 1284 1285 /* Skip any completely empty blocks */ 1286 while (*str && *str == ';') 1287 str++; 1288 1289 if (*str == ',') { 1290 /* 1291 * No options but restriction on slabs. This means full 1292 * debugging for slabs matching a pattern. 1293 */ 1294 *flags = DEBUG_DEFAULT_FLAGS; 1295 goto check_slabs; 1296 } 1297 *flags = 0; 1298 1299 /* Determine which debug features should be switched on */ 1300 for (; *str && *str != ',' && *str != ';'; str++) { 1301 switch (tolower(*str)) { 1302 case '-': 1303 *flags = 0; 1304 break; 1305 case 'f': 1306 *flags |= SLAB_CONSISTENCY_CHECKS; 1307 break; 1308 case 'z': 1309 *flags |= SLAB_RED_ZONE; 1310 break; 1311 case 'p': 1312 *flags |= SLAB_POISON; 1313 break; 1314 case 'u': 1315 *flags |= SLAB_STORE_USER; 1316 break; 1317 case 't': 1318 *flags |= SLAB_TRACE; 1319 break; 1320 case 'a': 1321 *flags |= SLAB_FAILSLAB; 1322 break; 1323 case 'o': 1324 /* 1325 * Avoid enabling debugging on caches if its minimum 1326 * order would increase as a result. 1327 */ 1328 higher_order_disable = true; 1329 break; 1330 default: 1331 if (init) 1332 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1333 } 1334 } 1335 check_slabs: 1336 if (*str == ',') 1337 *slabs = ++str; 1338 else 1339 *slabs = NULL; 1340 1341 /* Skip over the slab list */ 1342 while (*str && *str != ';') 1343 str++; 1344 1345 /* Skip any completely empty blocks */ 1346 while (*str && *str == ';') 1347 str++; 1348 1349 if (init && higher_order_disable) 1350 disable_higher_order_debug = 1; 1351 1352 if (*str) 1353 return str; 1354 else 1355 return NULL; 1356 } 1357 1358 static int __init setup_slub_debug(char *str) 1359 { 1360 slab_flags_t flags; 1361 char *saved_str; 1362 char *slab_list; 1363 bool global_slub_debug_changed = false; 1364 bool slab_list_specified = false; 1365 1366 slub_debug = DEBUG_DEFAULT_FLAGS; 1367 if (*str++ != '=' || !*str) 1368 /* 1369 * No options specified. Switch on full debugging. 1370 */ 1371 goto out; 1372 1373 saved_str = str; 1374 while (str) { 1375 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1376 1377 if (!slab_list) { 1378 slub_debug = flags; 1379 global_slub_debug_changed = true; 1380 } else { 1381 slab_list_specified = true; 1382 } 1383 } 1384 1385 /* 1386 * For backwards compatibility, a single list of flags with list of 1387 * slabs means debugging is only enabled for those slabs, so the global 1388 * slub_debug should be 0. We can extended that to multiple lists as 1389 * long as there is no option specifying flags without a slab list. 1390 */ 1391 if (slab_list_specified) { 1392 if (!global_slub_debug_changed) 1393 slub_debug = 0; 1394 slub_debug_string = saved_str; 1395 } 1396 out: 1397 if (slub_debug != 0 || slub_debug_string) 1398 static_branch_enable(&slub_debug_enabled); 1399 if ((static_branch_unlikely(&init_on_alloc) || 1400 static_branch_unlikely(&init_on_free)) && 1401 (slub_debug & SLAB_POISON)) 1402 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1403 return 1; 1404 } 1405 1406 __setup("slub_debug", setup_slub_debug); 1407 1408 /* 1409 * kmem_cache_flags - apply debugging options to the cache 1410 * @object_size: the size of an object without meta data 1411 * @flags: flags to set 1412 * @name: name of the cache 1413 * 1414 * Debug option(s) are applied to @flags. In addition to the debug 1415 * option(s), if a slab name (or multiple) is specified i.e. 1416 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1417 * then only the select slabs will receive the debug option(s). 1418 */ 1419 slab_flags_t kmem_cache_flags(unsigned int object_size, 1420 slab_flags_t flags, const char *name) 1421 { 1422 char *iter; 1423 size_t len; 1424 char *next_block; 1425 slab_flags_t block_flags; 1426 slab_flags_t slub_debug_local = slub_debug; 1427 1428 /* 1429 * If the slab cache is for debugging (e.g. kmemleak) then 1430 * don't store user (stack trace) information by default, 1431 * but let the user enable it via the command line below. 1432 */ 1433 if (flags & SLAB_NOLEAKTRACE) 1434 slub_debug_local &= ~SLAB_STORE_USER; 1435 1436 len = strlen(name); 1437 next_block = slub_debug_string; 1438 /* Go through all blocks of debug options, see if any matches our slab's name */ 1439 while (next_block) { 1440 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1441 if (!iter) 1442 continue; 1443 /* Found a block that has a slab list, search it */ 1444 while (*iter) { 1445 char *end, *glob; 1446 size_t cmplen; 1447 1448 end = strchrnul(iter, ','); 1449 if (next_block && next_block < end) 1450 end = next_block - 1; 1451 1452 glob = strnchr(iter, end - iter, '*'); 1453 if (glob) 1454 cmplen = glob - iter; 1455 else 1456 cmplen = max_t(size_t, len, (end - iter)); 1457 1458 if (!strncmp(name, iter, cmplen)) { 1459 flags |= block_flags; 1460 return flags; 1461 } 1462 1463 if (!*end || *end == ';') 1464 break; 1465 iter = end + 1; 1466 } 1467 } 1468 1469 return flags | slub_debug_local; 1470 } 1471 #else /* !CONFIG_SLUB_DEBUG */ 1472 static inline void setup_object_debug(struct kmem_cache *s, 1473 struct page *page, void *object) {} 1474 static inline 1475 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1476 1477 static inline int alloc_debug_processing(struct kmem_cache *s, 1478 struct page *page, void *object, unsigned long addr) { return 0; } 1479 1480 static inline int free_debug_processing( 1481 struct kmem_cache *s, struct page *page, 1482 void *head, void *tail, int bulk_cnt, 1483 unsigned long addr) { return 0; } 1484 1485 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1486 { return 1; } 1487 static inline int check_object(struct kmem_cache *s, struct page *page, 1488 void *object, u8 val) { return 1; } 1489 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1490 struct page *page) {} 1491 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1492 struct page *page) {} 1493 slab_flags_t kmem_cache_flags(unsigned int object_size, 1494 slab_flags_t flags, const char *name) 1495 { 1496 return flags; 1497 } 1498 #define slub_debug 0 1499 1500 #define disable_higher_order_debug 0 1501 1502 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1503 { return 0; } 1504 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1505 { return 0; } 1506 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1507 int objects) {} 1508 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1509 int objects) {} 1510 1511 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 1512 void **freelist, void *nextfree) 1513 { 1514 return false; 1515 } 1516 #endif /* CONFIG_SLUB_DEBUG */ 1517 1518 /* 1519 * Hooks for other subsystems that check memory allocations. In a typical 1520 * production configuration these hooks all should produce no code at all. 1521 */ 1522 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1523 { 1524 ptr = kasan_kmalloc_large(ptr, size, flags); 1525 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1526 kmemleak_alloc(ptr, size, 1, flags); 1527 return ptr; 1528 } 1529 1530 static __always_inline void kfree_hook(void *x) 1531 { 1532 kmemleak_free(x); 1533 kasan_kfree_large(x); 1534 } 1535 1536 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) 1537 { 1538 kmemleak_free_recursive(x, s->flags); 1539 1540 /* 1541 * Trouble is that we may no longer disable interrupts in the fast path 1542 * So in order to make the debug calls that expect irqs to be 1543 * disabled we need to disable interrupts temporarily. 1544 */ 1545 #ifdef CONFIG_LOCKDEP 1546 { 1547 unsigned long flags; 1548 1549 local_irq_save(flags); 1550 debug_check_no_locks_freed(x, s->object_size); 1551 local_irq_restore(flags); 1552 } 1553 #endif 1554 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1555 debug_check_no_obj_freed(x, s->object_size); 1556 1557 /* Use KCSAN to help debug racy use-after-free. */ 1558 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1559 __kcsan_check_access(x, s->object_size, 1560 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1561 1562 /* KASAN might put x into memory quarantine, delaying its reuse */ 1563 return kasan_slab_free(s, x); 1564 } 1565 1566 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1567 void **head, void **tail) 1568 { 1569 1570 void *object; 1571 void *next = *head; 1572 void *old_tail = *tail ? *tail : *head; 1573 int rsize; 1574 1575 if (is_kfence_address(next)) { 1576 slab_free_hook(s, next); 1577 return true; 1578 } 1579 1580 /* Head and tail of the reconstructed freelist */ 1581 *head = NULL; 1582 *tail = NULL; 1583 1584 do { 1585 object = next; 1586 next = get_freepointer(s, object); 1587 1588 if (slab_want_init_on_free(s)) { 1589 /* 1590 * Clear the object and the metadata, but don't touch 1591 * the redzone. 1592 */ 1593 memset(kasan_reset_tag(object), 0, s->object_size); 1594 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad 1595 : 0; 1596 memset((char *)kasan_reset_tag(object) + s->inuse, 0, 1597 s->size - s->inuse - rsize); 1598 1599 } 1600 /* If object's reuse doesn't have to be delayed */ 1601 if (!slab_free_hook(s, object)) { 1602 /* Move object to the new freelist */ 1603 set_freepointer(s, object, *head); 1604 *head = object; 1605 if (!*tail) 1606 *tail = object; 1607 } 1608 } while (object != old_tail); 1609 1610 if (*head == *tail) 1611 *tail = NULL; 1612 1613 return *head != NULL; 1614 } 1615 1616 static void *setup_object(struct kmem_cache *s, struct page *page, 1617 void *object) 1618 { 1619 setup_object_debug(s, page, object); 1620 object = kasan_init_slab_obj(s, object); 1621 if (unlikely(s->ctor)) { 1622 kasan_unpoison_object_data(s, object); 1623 s->ctor(object); 1624 kasan_poison_object_data(s, object); 1625 } 1626 return object; 1627 } 1628 1629 /* 1630 * Slab allocation and freeing 1631 */ 1632 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1633 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1634 { 1635 struct page *page; 1636 unsigned int order = oo_order(oo); 1637 1638 if (node == NUMA_NO_NODE) 1639 page = alloc_pages(flags, order); 1640 else 1641 page = __alloc_pages_node(node, flags, order); 1642 1643 return page; 1644 } 1645 1646 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1647 /* Pre-initialize the random sequence cache */ 1648 static int init_cache_random_seq(struct kmem_cache *s) 1649 { 1650 unsigned int count = oo_objects(s->oo); 1651 int err; 1652 1653 /* Bailout if already initialised */ 1654 if (s->random_seq) 1655 return 0; 1656 1657 err = cache_random_seq_create(s, count, GFP_KERNEL); 1658 if (err) { 1659 pr_err("SLUB: Unable to initialize free list for %s\n", 1660 s->name); 1661 return err; 1662 } 1663 1664 /* Transform to an offset on the set of pages */ 1665 if (s->random_seq) { 1666 unsigned int i; 1667 1668 for (i = 0; i < count; i++) 1669 s->random_seq[i] *= s->size; 1670 } 1671 return 0; 1672 } 1673 1674 /* Initialize each random sequence freelist per cache */ 1675 static void __init init_freelist_randomization(void) 1676 { 1677 struct kmem_cache *s; 1678 1679 mutex_lock(&slab_mutex); 1680 1681 list_for_each_entry(s, &slab_caches, list) 1682 init_cache_random_seq(s); 1683 1684 mutex_unlock(&slab_mutex); 1685 } 1686 1687 /* Get the next entry on the pre-computed freelist randomized */ 1688 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1689 unsigned long *pos, void *start, 1690 unsigned long page_limit, 1691 unsigned long freelist_count) 1692 { 1693 unsigned int idx; 1694 1695 /* 1696 * If the target page allocation failed, the number of objects on the 1697 * page might be smaller than the usual size defined by the cache. 1698 */ 1699 do { 1700 idx = s->random_seq[*pos]; 1701 *pos += 1; 1702 if (*pos >= freelist_count) 1703 *pos = 0; 1704 } while (unlikely(idx >= page_limit)); 1705 1706 return (char *)start + idx; 1707 } 1708 1709 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1710 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1711 { 1712 void *start; 1713 void *cur; 1714 void *next; 1715 unsigned long idx, pos, page_limit, freelist_count; 1716 1717 if (page->objects < 2 || !s->random_seq) 1718 return false; 1719 1720 freelist_count = oo_objects(s->oo); 1721 pos = get_random_int() % freelist_count; 1722 1723 page_limit = page->objects * s->size; 1724 start = fixup_red_left(s, page_address(page)); 1725 1726 /* First entry is used as the base of the freelist */ 1727 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1728 freelist_count); 1729 cur = setup_object(s, page, cur); 1730 page->freelist = cur; 1731 1732 for (idx = 1; idx < page->objects; idx++) { 1733 next = next_freelist_entry(s, page, &pos, start, page_limit, 1734 freelist_count); 1735 next = setup_object(s, page, next); 1736 set_freepointer(s, cur, next); 1737 cur = next; 1738 } 1739 set_freepointer(s, cur, NULL); 1740 1741 return true; 1742 } 1743 #else 1744 static inline int init_cache_random_seq(struct kmem_cache *s) 1745 { 1746 return 0; 1747 } 1748 static inline void init_freelist_randomization(void) { } 1749 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1750 { 1751 return false; 1752 } 1753 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1754 1755 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1756 { 1757 struct page *page; 1758 struct kmem_cache_order_objects oo = s->oo; 1759 gfp_t alloc_gfp; 1760 void *start, *p, *next; 1761 int idx; 1762 bool shuffle; 1763 1764 flags &= gfp_allowed_mask; 1765 1766 if (gfpflags_allow_blocking(flags)) 1767 local_irq_enable(); 1768 1769 flags |= s->allocflags; 1770 1771 /* 1772 * Let the initial higher-order allocation fail under memory pressure 1773 * so we fall-back to the minimum order allocation. 1774 */ 1775 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1776 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1777 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1778 1779 page = alloc_slab_page(s, alloc_gfp, node, oo); 1780 if (unlikely(!page)) { 1781 oo = s->min; 1782 alloc_gfp = flags; 1783 /* 1784 * Allocation may have failed due to fragmentation. 1785 * Try a lower order alloc if possible 1786 */ 1787 page = alloc_slab_page(s, alloc_gfp, node, oo); 1788 if (unlikely(!page)) 1789 goto out; 1790 stat(s, ORDER_FALLBACK); 1791 } 1792 1793 page->objects = oo_objects(oo); 1794 1795 account_slab_page(page, oo_order(oo), s, flags); 1796 1797 page->slab_cache = s; 1798 __SetPageSlab(page); 1799 if (page_is_pfmemalloc(page)) 1800 SetPageSlabPfmemalloc(page); 1801 1802 kasan_poison_slab(page); 1803 1804 start = page_address(page); 1805 1806 setup_page_debug(s, page, start); 1807 1808 shuffle = shuffle_freelist(s, page); 1809 1810 if (!shuffle) { 1811 start = fixup_red_left(s, start); 1812 start = setup_object(s, page, start); 1813 page->freelist = start; 1814 for (idx = 0, p = start; idx < page->objects - 1; idx++) { 1815 next = p + s->size; 1816 next = setup_object(s, page, next); 1817 set_freepointer(s, p, next); 1818 p = next; 1819 } 1820 set_freepointer(s, p, NULL); 1821 } 1822 1823 page->inuse = page->objects; 1824 page->frozen = 1; 1825 1826 out: 1827 if (gfpflags_allow_blocking(flags)) 1828 local_irq_disable(); 1829 if (!page) 1830 return NULL; 1831 1832 inc_slabs_node(s, page_to_nid(page), page->objects); 1833 1834 return page; 1835 } 1836 1837 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1838 { 1839 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1840 flags = kmalloc_fix_flags(flags); 1841 1842 return allocate_slab(s, 1843 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1844 } 1845 1846 static void __free_slab(struct kmem_cache *s, struct page *page) 1847 { 1848 int order = compound_order(page); 1849 int pages = 1 << order; 1850 1851 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 1852 void *p; 1853 1854 slab_pad_check(s, page); 1855 for_each_object(p, s, page_address(page), 1856 page->objects) 1857 check_object(s, page, p, SLUB_RED_INACTIVE); 1858 } 1859 1860 __ClearPageSlabPfmemalloc(page); 1861 __ClearPageSlab(page); 1862 /* In union with page->mapping where page allocator expects NULL */ 1863 page->slab_cache = NULL; 1864 if (current->reclaim_state) 1865 current->reclaim_state->reclaimed_slab += pages; 1866 unaccount_slab_page(page, order, s); 1867 __free_pages(page, order); 1868 } 1869 1870 static void rcu_free_slab(struct rcu_head *h) 1871 { 1872 struct page *page = container_of(h, struct page, rcu_head); 1873 1874 __free_slab(page->slab_cache, page); 1875 } 1876 1877 static void free_slab(struct kmem_cache *s, struct page *page) 1878 { 1879 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 1880 call_rcu(&page->rcu_head, rcu_free_slab); 1881 } else 1882 __free_slab(s, page); 1883 } 1884 1885 static void discard_slab(struct kmem_cache *s, struct page *page) 1886 { 1887 dec_slabs_node(s, page_to_nid(page), page->objects); 1888 free_slab(s, page); 1889 } 1890 1891 /* 1892 * Management of partially allocated slabs. 1893 */ 1894 static inline void 1895 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 1896 { 1897 n->nr_partial++; 1898 if (tail == DEACTIVATE_TO_TAIL) 1899 list_add_tail(&page->slab_list, &n->partial); 1900 else 1901 list_add(&page->slab_list, &n->partial); 1902 } 1903 1904 static inline void add_partial(struct kmem_cache_node *n, 1905 struct page *page, int tail) 1906 { 1907 lockdep_assert_held(&n->list_lock); 1908 __add_partial(n, page, tail); 1909 } 1910 1911 static inline void remove_partial(struct kmem_cache_node *n, 1912 struct page *page) 1913 { 1914 lockdep_assert_held(&n->list_lock); 1915 list_del(&page->slab_list); 1916 n->nr_partial--; 1917 } 1918 1919 /* 1920 * Remove slab from the partial list, freeze it and 1921 * return the pointer to the freelist. 1922 * 1923 * Returns a list of objects or NULL if it fails. 1924 */ 1925 static inline void *acquire_slab(struct kmem_cache *s, 1926 struct kmem_cache_node *n, struct page *page, 1927 int mode, int *objects) 1928 { 1929 void *freelist; 1930 unsigned long counters; 1931 struct page new; 1932 1933 lockdep_assert_held(&n->list_lock); 1934 1935 /* 1936 * Zap the freelist and set the frozen bit. 1937 * The old freelist is the list of objects for the 1938 * per cpu allocation list. 1939 */ 1940 freelist = page->freelist; 1941 counters = page->counters; 1942 new.counters = counters; 1943 *objects = new.objects - new.inuse; 1944 if (mode) { 1945 new.inuse = page->objects; 1946 new.freelist = NULL; 1947 } else { 1948 new.freelist = freelist; 1949 } 1950 1951 VM_BUG_ON(new.frozen); 1952 new.frozen = 1; 1953 1954 if (!__cmpxchg_double_slab(s, page, 1955 freelist, counters, 1956 new.freelist, new.counters, 1957 "acquire_slab")) 1958 return NULL; 1959 1960 remove_partial(n, page); 1961 WARN_ON(!freelist); 1962 return freelist; 1963 } 1964 1965 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 1966 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 1967 1968 /* 1969 * Try to allocate a partial slab from a specific node. 1970 */ 1971 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 1972 struct kmem_cache_cpu *c, gfp_t flags) 1973 { 1974 struct page *page, *page2; 1975 void *object = NULL; 1976 unsigned int available = 0; 1977 int objects; 1978 1979 /* 1980 * Racy check. If we mistakenly see no partial slabs then we 1981 * just allocate an empty slab. If we mistakenly try to get a 1982 * partial slab and there is none available then get_partial() 1983 * will return NULL. 1984 */ 1985 if (!n || !n->nr_partial) 1986 return NULL; 1987 1988 spin_lock(&n->list_lock); 1989 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { 1990 void *t; 1991 1992 if (!pfmemalloc_match(page, flags)) 1993 continue; 1994 1995 t = acquire_slab(s, n, page, object == NULL, &objects); 1996 if (!t) 1997 break; 1998 1999 available += objects; 2000 if (!object) { 2001 c->page = page; 2002 stat(s, ALLOC_FROM_PARTIAL); 2003 object = t; 2004 } else { 2005 put_cpu_partial(s, page, 0); 2006 stat(s, CPU_PARTIAL_NODE); 2007 } 2008 if (!kmem_cache_has_cpu_partial(s) 2009 || available > slub_cpu_partial(s) / 2) 2010 break; 2011 2012 } 2013 spin_unlock(&n->list_lock); 2014 return object; 2015 } 2016 2017 /* 2018 * Get a page from somewhere. Search in increasing NUMA distances. 2019 */ 2020 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2021 struct kmem_cache_cpu *c) 2022 { 2023 #ifdef CONFIG_NUMA 2024 struct zonelist *zonelist; 2025 struct zoneref *z; 2026 struct zone *zone; 2027 enum zone_type highest_zoneidx = gfp_zone(flags); 2028 void *object; 2029 unsigned int cpuset_mems_cookie; 2030 2031 /* 2032 * The defrag ratio allows a configuration of the tradeoffs between 2033 * inter node defragmentation and node local allocations. A lower 2034 * defrag_ratio increases the tendency to do local allocations 2035 * instead of attempting to obtain partial slabs from other nodes. 2036 * 2037 * If the defrag_ratio is set to 0 then kmalloc() always 2038 * returns node local objects. If the ratio is higher then kmalloc() 2039 * may return off node objects because partial slabs are obtained 2040 * from other nodes and filled up. 2041 * 2042 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2043 * (which makes defrag_ratio = 1000) then every (well almost) 2044 * allocation will first attempt to defrag slab caches on other nodes. 2045 * This means scanning over all nodes to look for partial slabs which 2046 * may be expensive if we do it every time we are trying to find a slab 2047 * with available objects. 2048 */ 2049 if (!s->remote_node_defrag_ratio || 2050 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2051 return NULL; 2052 2053 do { 2054 cpuset_mems_cookie = read_mems_allowed_begin(); 2055 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2056 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2057 struct kmem_cache_node *n; 2058 2059 n = get_node(s, zone_to_nid(zone)); 2060 2061 if (n && cpuset_zone_allowed(zone, flags) && 2062 n->nr_partial > s->min_partial) { 2063 object = get_partial_node(s, n, c, flags); 2064 if (object) { 2065 /* 2066 * Don't check read_mems_allowed_retry() 2067 * here - if mems_allowed was updated in 2068 * parallel, that was a harmless race 2069 * between allocation and the cpuset 2070 * update 2071 */ 2072 return object; 2073 } 2074 } 2075 } 2076 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2077 #endif /* CONFIG_NUMA */ 2078 return NULL; 2079 } 2080 2081 /* 2082 * Get a partial page, lock it and return it. 2083 */ 2084 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2085 struct kmem_cache_cpu *c) 2086 { 2087 void *object; 2088 int searchnode = node; 2089 2090 if (node == NUMA_NO_NODE) 2091 searchnode = numa_mem_id(); 2092 2093 object = get_partial_node(s, get_node(s, searchnode), c, flags); 2094 if (object || node != NUMA_NO_NODE) 2095 return object; 2096 2097 return get_any_partial(s, flags, c); 2098 } 2099 2100 #ifdef CONFIG_PREEMPTION 2101 /* 2102 * Calculate the next globally unique transaction for disambiguation 2103 * during cmpxchg. The transactions start with the cpu number and are then 2104 * incremented by CONFIG_NR_CPUS. 2105 */ 2106 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2107 #else 2108 /* 2109 * No preemption supported therefore also no need to check for 2110 * different cpus. 2111 */ 2112 #define TID_STEP 1 2113 #endif 2114 2115 static inline unsigned long next_tid(unsigned long tid) 2116 { 2117 return tid + TID_STEP; 2118 } 2119 2120 #ifdef SLUB_DEBUG_CMPXCHG 2121 static inline unsigned int tid_to_cpu(unsigned long tid) 2122 { 2123 return tid % TID_STEP; 2124 } 2125 2126 static inline unsigned long tid_to_event(unsigned long tid) 2127 { 2128 return tid / TID_STEP; 2129 } 2130 #endif 2131 2132 static inline unsigned int init_tid(int cpu) 2133 { 2134 return cpu; 2135 } 2136 2137 static inline void note_cmpxchg_failure(const char *n, 2138 const struct kmem_cache *s, unsigned long tid) 2139 { 2140 #ifdef SLUB_DEBUG_CMPXCHG 2141 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2142 2143 pr_info("%s %s: cmpxchg redo ", n, s->name); 2144 2145 #ifdef CONFIG_PREEMPTION 2146 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2147 pr_warn("due to cpu change %d -> %d\n", 2148 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2149 else 2150 #endif 2151 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2152 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2153 tid_to_event(tid), tid_to_event(actual_tid)); 2154 else 2155 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2156 actual_tid, tid, next_tid(tid)); 2157 #endif 2158 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2159 } 2160 2161 static void init_kmem_cache_cpus(struct kmem_cache *s) 2162 { 2163 int cpu; 2164 2165 for_each_possible_cpu(cpu) 2166 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 2167 } 2168 2169 /* 2170 * Remove the cpu slab 2171 */ 2172 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2173 void *freelist, struct kmem_cache_cpu *c) 2174 { 2175 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2176 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2177 int lock = 0, free_delta = 0; 2178 enum slab_modes l = M_NONE, m = M_NONE; 2179 void *nextfree, *freelist_iter, *freelist_tail; 2180 int tail = DEACTIVATE_TO_HEAD; 2181 struct page new; 2182 struct page old; 2183 2184 if (page->freelist) { 2185 stat(s, DEACTIVATE_REMOTE_FREES); 2186 tail = DEACTIVATE_TO_TAIL; 2187 } 2188 2189 /* 2190 * Stage one: Count the objects on cpu's freelist as free_delta and 2191 * remember the last object in freelist_tail for later splicing. 2192 */ 2193 freelist_tail = NULL; 2194 freelist_iter = freelist; 2195 while (freelist_iter) { 2196 nextfree = get_freepointer(s, freelist_iter); 2197 2198 /* 2199 * If 'nextfree' is invalid, it is possible that the object at 2200 * 'freelist_iter' is already corrupted. So isolate all objects 2201 * starting at 'freelist_iter' by skipping them. 2202 */ 2203 if (freelist_corrupted(s, page, &freelist_iter, nextfree)) 2204 break; 2205 2206 freelist_tail = freelist_iter; 2207 free_delta++; 2208 2209 freelist_iter = nextfree; 2210 } 2211 2212 /* 2213 * Stage two: Unfreeze the page while splicing the per-cpu 2214 * freelist to the head of page's freelist. 2215 * 2216 * Ensure that the page is unfrozen while the list presence 2217 * reflects the actual number of objects during unfreeze. 2218 * 2219 * We setup the list membership and then perform a cmpxchg 2220 * with the count. If there is a mismatch then the page 2221 * is not unfrozen but the page is on the wrong list. 2222 * 2223 * Then we restart the process which may have to remove 2224 * the page from the list that we just put it on again 2225 * because the number of objects in the slab may have 2226 * changed. 2227 */ 2228 redo: 2229 2230 old.freelist = READ_ONCE(page->freelist); 2231 old.counters = READ_ONCE(page->counters); 2232 VM_BUG_ON(!old.frozen); 2233 2234 /* Determine target state of the slab */ 2235 new.counters = old.counters; 2236 if (freelist_tail) { 2237 new.inuse -= free_delta; 2238 set_freepointer(s, freelist_tail, old.freelist); 2239 new.freelist = freelist; 2240 } else 2241 new.freelist = old.freelist; 2242 2243 new.frozen = 0; 2244 2245 if (!new.inuse && n->nr_partial >= s->min_partial) 2246 m = M_FREE; 2247 else if (new.freelist) { 2248 m = M_PARTIAL; 2249 if (!lock) { 2250 lock = 1; 2251 /* 2252 * Taking the spinlock removes the possibility 2253 * that acquire_slab() will see a slab page that 2254 * is frozen 2255 */ 2256 spin_lock(&n->list_lock); 2257 } 2258 } else { 2259 m = M_FULL; 2260 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) { 2261 lock = 1; 2262 /* 2263 * This also ensures that the scanning of full 2264 * slabs from diagnostic functions will not see 2265 * any frozen slabs. 2266 */ 2267 spin_lock(&n->list_lock); 2268 } 2269 } 2270 2271 if (l != m) { 2272 if (l == M_PARTIAL) 2273 remove_partial(n, page); 2274 else if (l == M_FULL) 2275 remove_full(s, n, page); 2276 2277 if (m == M_PARTIAL) 2278 add_partial(n, page, tail); 2279 else if (m == M_FULL) 2280 add_full(s, n, page); 2281 } 2282 2283 l = m; 2284 if (!__cmpxchg_double_slab(s, page, 2285 old.freelist, old.counters, 2286 new.freelist, new.counters, 2287 "unfreezing slab")) 2288 goto redo; 2289 2290 if (lock) 2291 spin_unlock(&n->list_lock); 2292 2293 if (m == M_PARTIAL) 2294 stat(s, tail); 2295 else if (m == M_FULL) 2296 stat(s, DEACTIVATE_FULL); 2297 else if (m == M_FREE) { 2298 stat(s, DEACTIVATE_EMPTY); 2299 discard_slab(s, page); 2300 stat(s, FREE_SLAB); 2301 } 2302 2303 c->page = NULL; 2304 c->freelist = NULL; 2305 } 2306 2307 /* 2308 * Unfreeze all the cpu partial slabs. 2309 * 2310 * This function must be called with interrupts disabled 2311 * for the cpu using c (or some other guarantee must be there 2312 * to guarantee no concurrent accesses). 2313 */ 2314 static void unfreeze_partials(struct kmem_cache *s, 2315 struct kmem_cache_cpu *c) 2316 { 2317 #ifdef CONFIG_SLUB_CPU_PARTIAL 2318 struct kmem_cache_node *n = NULL, *n2 = NULL; 2319 struct page *page, *discard_page = NULL; 2320 2321 while ((page = slub_percpu_partial(c))) { 2322 struct page new; 2323 struct page old; 2324 2325 slub_set_percpu_partial(c, page); 2326 2327 n2 = get_node(s, page_to_nid(page)); 2328 if (n != n2) { 2329 if (n) 2330 spin_unlock(&n->list_lock); 2331 2332 n = n2; 2333 spin_lock(&n->list_lock); 2334 } 2335 2336 do { 2337 2338 old.freelist = page->freelist; 2339 old.counters = page->counters; 2340 VM_BUG_ON(!old.frozen); 2341 2342 new.counters = old.counters; 2343 new.freelist = old.freelist; 2344 2345 new.frozen = 0; 2346 2347 } while (!__cmpxchg_double_slab(s, page, 2348 old.freelist, old.counters, 2349 new.freelist, new.counters, 2350 "unfreezing slab")); 2351 2352 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2353 page->next = discard_page; 2354 discard_page = page; 2355 } else { 2356 add_partial(n, page, DEACTIVATE_TO_TAIL); 2357 stat(s, FREE_ADD_PARTIAL); 2358 } 2359 } 2360 2361 if (n) 2362 spin_unlock(&n->list_lock); 2363 2364 while (discard_page) { 2365 page = discard_page; 2366 discard_page = discard_page->next; 2367 2368 stat(s, DEACTIVATE_EMPTY); 2369 discard_slab(s, page); 2370 stat(s, FREE_SLAB); 2371 } 2372 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2373 } 2374 2375 /* 2376 * Put a page that was just frozen (in __slab_free|get_partial_node) into a 2377 * partial page slot if available. 2378 * 2379 * If we did not find a slot then simply move all the partials to the 2380 * per node partial list. 2381 */ 2382 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2383 { 2384 #ifdef CONFIG_SLUB_CPU_PARTIAL 2385 struct page *oldpage; 2386 int pages; 2387 int pobjects; 2388 2389 preempt_disable(); 2390 do { 2391 pages = 0; 2392 pobjects = 0; 2393 oldpage = this_cpu_read(s->cpu_slab->partial); 2394 2395 if (oldpage) { 2396 pobjects = oldpage->pobjects; 2397 pages = oldpage->pages; 2398 if (drain && pobjects > slub_cpu_partial(s)) { 2399 unsigned long flags; 2400 /* 2401 * partial array is full. Move the existing 2402 * set to the per node partial list. 2403 */ 2404 local_irq_save(flags); 2405 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2406 local_irq_restore(flags); 2407 oldpage = NULL; 2408 pobjects = 0; 2409 pages = 0; 2410 stat(s, CPU_PARTIAL_DRAIN); 2411 } 2412 } 2413 2414 pages++; 2415 pobjects += page->objects - page->inuse; 2416 2417 page->pages = pages; 2418 page->pobjects = pobjects; 2419 page->next = oldpage; 2420 2421 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) 2422 != oldpage); 2423 if (unlikely(!slub_cpu_partial(s))) { 2424 unsigned long flags; 2425 2426 local_irq_save(flags); 2427 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); 2428 local_irq_restore(flags); 2429 } 2430 preempt_enable(); 2431 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2432 } 2433 2434 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2435 { 2436 stat(s, CPUSLAB_FLUSH); 2437 deactivate_slab(s, c->page, c->freelist, c); 2438 2439 c->tid = next_tid(c->tid); 2440 } 2441 2442 /* 2443 * Flush cpu slab. 2444 * 2445 * Called from IPI handler with interrupts disabled. 2446 */ 2447 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2448 { 2449 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2450 2451 if (c->page) 2452 flush_slab(s, c); 2453 2454 unfreeze_partials(s, c); 2455 } 2456 2457 static void flush_cpu_slab(void *d) 2458 { 2459 struct kmem_cache *s = d; 2460 2461 __flush_cpu_slab(s, smp_processor_id()); 2462 } 2463 2464 static bool has_cpu_slab(int cpu, void *info) 2465 { 2466 struct kmem_cache *s = info; 2467 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2468 2469 return c->page || slub_percpu_partial(c); 2470 } 2471 2472 static void flush_all(struct kmem_cache *s) 2473 { 2474 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); 2475 } 2476 2477 /* 2478 * Use the cpu notifier to insure that the cpu slabs are flushed when 2479 * necessary. 2480 */ 2481 static int slub_cpu_dead(unsigned int cpu) 2482 { 2483 struct kmem_cache *s; 2484 unsigned long flags; 2485 2486 mutex_lock(&slab_mutex); 2487 list_for_each_entry(s, &slab_caches, list) { 2488 local_irq_save(flags); 2489 __flush_cpu_slab(s, cpu); 2490 local_irq_restore(flags); 2491 } 2492 mutex_unlock(&slab_mutex); 2493 return 0; 2494 } 2495 2496 /* 2497 * Check if the objects in a per cpu structure fit numa 2498 * locality expectations. 2499 */ 2500 static inline int node_match(struct page *page, int node) 2501 { 2502 #ifdef CONFIG_NUMA 2503 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2504 return 0; 2505 #endif 2506 return 1; 2507 } 2508 2509 #ifdef CONFIG_SLUB_DEBUG 2510 static int count_free(struct page *page) 2511 { 2512 return page->objects - page->inuse; 2513 } 2514 2515 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2516 { 2517 return atomic_long_read(&n->total_objects); 2518 } 2519 #endif /* CONFIG_SLUB_DEBUG */ 2520 2521 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2522 static unsigned long count_partial(struct kmem_cache_node *n, 2523 int (*get_count)(struct page *)) 2524 { 2525 unsigned long flags; 2526 unsigned long x = 0; 2527 struct page *page; 2528 2529 spin_lock_irqsave(&n->list_lock, flags); 2530 list_for_each_entry(page, &n->partial, slab_list) 2531 x += get_count(page); 2532 spin_unlock_irqrestore(&n->list_lock, flags); 2533 return x; 2534 } 2535 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2536 2537 static noinline void 2538 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2539 { 2540 #ifdef CONFIG_SLUB_DEBUG 2541 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2542 DEFAULT_RATELIMIT_BURST); 2543 int node; 2544 struct kmem_cache_node *n; 2545 2546 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2547 return; 2548 2549 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2550 nid, gfpflags, &gfpflags); 2551 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2552 s->name, s->object_size, s->size, oo_order(s->oo), 2553 oo_order(s->min)); 2554 2555 if (oo_order(s->min) > get_order(s->object_size)) 2556 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2557 s->name); 2558 2559 for_each_kmem_cache_node(s, node, n) { 2560 unsigned long nr_slabs; 2561 unsigned long nr_objs; 2562 unsigned long nr_free; 2563 2564 nr_free = count_partial(n, count_free); 2565 nr_slabs = node_nr_slabs(n); 2566 nr_objs = node_nr_objs(n); 2567 2568 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2569 node, nr_slabs, nr_objs, nr_free); 2570 } 2571 #endif 2572 } 2573 2574 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, 2575 int node, struct kmem_cache_cpu **pc) 2576 { 2577 void *freelist; 2578 struct kmem_cache_cpu *c = *pc; 2579 struct page *page; 2580 2581 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2582 2583 freelist = get_partial(s, flags, node, c); 2584 2585 if (freelist) 2586 return freelist; 2587 2588 page = new_slab(s, flags, node); 2589 if (page) { 2590 c = raw_cpu_ptr(s->cpu_slab); 2591 if (c->page) 2592 flush_slab(s, c); 2593 2594 /* 2595 * No other reference to the page yet so we can 2596 * muck around with it freely without cmpxchg 2597 */ 2598 freelist = page->freelist; 2599 page->freelist = NULL; 2600 2601 stat(s, ALLOC_SLAB); 2602 c->page = page; 2603 *pc = c; 2604 } 2605 2606 return freelist; 2607 } 2608 2609 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2610 { 2611 if (unlikely(PageSlabPfmemalloc(page))) 2612 return gfp_pfmemalloc_allowed(gfpflags); 2613 2614 return true; 2615 } 2616 2617 /* 2618 * Check the page->freelist of a page and either transfer the freelist to the 2619 * per cpu freelist or deactivate the page. 2620 * 2621 * The page is still frozen if the return value is not NULL. 2622 * 2623 * If this function returns NULL then the page has been unfrozen. 2624 * 2625 * This function must be called with interrupt disabled. 2626 */ 2627 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2628 { 2629 struct page new; 2630 unsigned long counters; 2631 void *freelist; 2632 2633 do { 2634 freelist = page->freelist; 2635 counters = page->counters; 2636 2637 new.counters = counters; 2638 VM_BUG_ON(!new.frozen); 2639 2640 new.inuse = page->objects; 2641 new.frozen = freelist != NULL; 2642 2643 } while (!__cmpxchg_double_slab(s, page, 2644 freelist, counters, 2645 NULL, new.counters, 2646 "get_freelist")); 2647 2648 return freelist; 2649 } 2650 2651 /* 2652 * Slow path. The lockless freelist is empty or we need to perform 2653 * debugging duties. 2654 * 2655 * Processing is still very fast if new objects have been freed to the 2656 * regular freelist. In that case we simply take over the regular freelist 2657 * as the lockless freelist and zap the regular freelist. 2658 * 2659 * If that is not working then we fall back to the partial lists. We take the 2660 * first element of the freelist as the object to allocate now and move the 2661 * rest of the freelist to the lockless freelist. 2662 * 2663 * And if we were unable to get a new slab from the partial slab lists then 2664 * we need to allocate a new slab. This is the slowest path since it involves 2665 * a call to the page allocator and the setup of a new slab. 2666 * 2667 * Version of __slab_alloc to use when we know that interrupts are 2668 * already disabled (which is the case for bulk allocation). 2669 */ 2670 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2671 unsigned long addr, struct kmem_cache_cpu *c) 2672 { 2673 void *freelist; 2674 struct page *page; 2675 2676 stat(s, ALLOC_SLOWPATH); 2677 2678 page = c->page; 2679 if (!page) { 2680 /* 2681 * if the node is not online or has no normal memory, just 2682 * ignore the node constraint 2683 */ 2684 if (unlikely(node != NUMA_NO_NODE && 2685 !node_isset(node, slab_nodes))) 2686 node = NUMA_NO_NODE; 2687 goto new_slab; 2688 } 2689 redo: 2690 2691 if (unlikely(!node_match(page, node))) { 2692 /* 2693 * same as above but node_match() being false already 2694 * implies node != NUMA_NO_NODE 2695 */ 2696 if (!node_isset(node, slab_nodes)) { 2697 node = NUMA_NO_NODE; 2698 goto redo; 2699 } else { 2700 stat(s, ALLOC_NODE_MISMATCH); 2701 deactivate_slab(s, page, c->freelist, c); 2702 goto new_slab; 2703 } 2704 } 2705 2706 /* 2707 * By rights, we should be searching for a slab page that was 2708 * PFMEMALLOC but right now, we are losing the pfmemalloc 2709 * information when the page leaves the per-cpu allocator 2710 */ 2711 if (unlikely(!pfmemalloc_match(page, gfpflags))) { 2712 deactivate_slab(s, page, c->freelist, c); 2713 goto new_slab; 2714 } 2715 2716 /* must check again c->freelist in case of cpu migration or IRQ */ 2717 freelist = c->freelist; 2718 if (freelist) 2719 goto load_freelist; 2720 2721 freelist = get_freelist(s, page); 2722 2723 if (!freelist) { 2724 c->page = NULL; 2725 stat(s, DEACTIVATE_BYPASS); 2726 goto new_slab; 2727 } 2728 2729 stat(s, ALLOC_REFILL); 2730 2731 load_freelist: 2732 /* 2733 * freelist is pointing to the list of objects to be used. 2734 * page is pointing to the page from which the objects are obtained. 2735 * That page must be frozen for per cpu allocations to work. 2736 */ 2737 VM_BUG_ON(!c->page->frozen); 2738 c->freelist = get_freepointer(s, freelist); 2739 c->tid = next_tid(c->tid); 2740 return freelist; 2741 2742 new_slab: 2743 2744 if (slub_percpu_partial(c)) { 2745 page = c->page = slub_percpu_partial(c); 2746 slub_set_percpu_partial(c, page); 2747 stat(s, CPU_PARTIAL_ALLOC); 2748 goto redo; 2749 } 2750 2751 freelist = new_slab_objects(s, gfpflags, node, &c); 2752 2753 if (unlikely(!freelist)) { 2754 slab_out_of_memory(s, gfpflags, node); 2755 return NULL; 2756 } 2757 2758 page = c->page; 2759 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) 2760 goto load_freelist; 2761 2762 /* Only entered in the debug case */ 2763 if (kmem_cache_debug(s) && 2764 !alloc_debug_processing(s, page, freelist, addr)) 2765 goto new_slab; /* Slab failed checks. Next slab needed */ 2766 2767 deactivate_slab(s, page, get_freepointer(s, freelist), c); 2768 return freelist; 2769 } 2770 2771 /* 2772 * Another one that disabled interrupt and compensates for possible 2773 * cpu changes by refetching the per cpu area pointer. 2774 */ 2775 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2776 unsigned long addr, struct kmem_cache_cpu *c) 2777 { 2778 void *p; 2779 unsigned long flags; 2780 2781 local_irq_save(flags); 2782 #ifdef CONFIG_PREEMPTION 2783 /* 2784 * We may have been preempted and rescheduled on a different 2785 * cpu before disabling interrupts. Need to reload cpu area 2786 * pointer. 2787 */ 2788 c = this_cpu_ptr(s->cpu_slab); 2789 #endif 2790 2791 p = ___slab_alloc(s, gfpflags, node, addr, c); 2792 local_irq_restore(flags); 2793 return p; 2794 } 2795 2796 /* 2797 * If the object has been wiped upon free, make sure it's fully initialized by 2798 * zeroing out freelist pointer. 2799 */ 2800 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 2801 void *obj) 2802 { 2803 if (unlikely(slab_want_init_on_free(s)) && obj) 2804 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 2805 0, sizeof(void *)); 2806 } 2807 2808 /* 2809 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2810 * have the fastpath folded into their functions. So no function call 2811 * overhead for requests that can be satisfied on the fastpath. 2812 * 2813 * The fastpath works by first checking if the lockless freelist can be used. 2814 * If not then __slab_alloc is called for slow processing. 2815 * 2816 * Otherwise we can simply pick the next object from the lockless free list. 2817 */ 2818 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2819 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 2820 { 2821 void *object; 2822 struct kmem_cache_cpu *c; 2823 struct page *page; 2824 unsigned long tid; 2825 struct obj_cgroup *objcg = NULL; 2826 2827 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); 2828 if (!s) 2829 return NULL; 2830 2831 object = kfence_alloc(s, orig_size, gfpflags); 2832 if (unlikely(object)) 2833 goto out; 2834 2835 redo: 2836 /* 2837 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 2838 * enabled. We may switch back and forth between cpus while 2839 * reading from one cpu area. That does not matter as long 2840 * as we end up on the original cpu again when doing the cmpxchg. 2841 * 2842 * We should guarantee that tid and kmem_cache are retrieved on 2843 * the same cpu. It could be different if CONFIG_PREEMPTION so we need 2844 * to check if it is matched or not. 2845 */ 2846 do { 2847 tid = this_cpu_read(s->cpu_slab->tid); 2848 c = raw_cpu_ptr(s->cpu_slab); 2849 } while (IS_ENABLED(CONFIG_PREEMPTION) && 2850 unlikely(tid != READ_ONCE(c->tid))); 2851 2852 /* 2853 * Irqless object alloc/free algorithm used here depends on sequence 2854 * of fetching cpu_slab's data. tid should be fetched before anything 2855 * on c to guarantee that object and page associated with previous tid 2856 * won't be used with current tid. If we fetch tid first, object and 2857 * page could be one associated with next tid and our alloc/free 2858 * request will be failed. In this case, we will retry. So, no problem. 2859 */ 2860 barrier(); 2861 2862 /* 2863 * The transaction ids are globally unique per cpu and per operation on 2864 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 2865 * occurs on the right processor and that there was no operation on the 2866 * linked list in between. 2867 */ 2868 2869 object = c->freelist; 2870 page = c->page; 2871 if (unlikely(!object || !page || !node_match(page, node))) { 2872 object = __slab_alloc(s, gfpflags, node, addr, c); 2873 } else { 2874 void *next_object = get_freepointer_safe(s, object); 2875 2876 /* 2877 * The cmpxchg will only match if there was no additional 2878 * operation and if we are on the right processor. 2879 * 2880 * The cmpxchg does the following atomically (without lock 2881 * semantics!) 2882 * 1. Relocate first pointer to the current per cpu area. 2883 * 2. Verify that tid and freelist have not been changed 2884 * 3. If they were not changed replace tid and freelist 2885 * 2886 * Since this is without lock semantics the protection is only 2887 * against code executing on this cpu *not* from access by 2888 * other cpus. 2889 */ 2890 if (unlikely(!this_cpu_cmpxchg_double( 2891 s->cpu_slab->freelist, s->cpu_slab->tid, 2892 object, tid, 2893 next_object, next_tid(tid)))) { 2894 2895 note_cmpxchg_failure("slab_alloc", s, tid); 2896 goto redo; 2897 } 2898 prefetch_freepointer(s, next_object); 2899 stat(s, ALLOC_FASTPATH); 2900 } 2901 2902 maybe_wipe_obj_freeptr(s, object); 2903 2904 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) 2905 memset(kasan_reset_tag(object), 0, s->object_size); 2906 2907 out: 2908 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); 2909 2910 return object; 2911 } 2912 2913 static __always_inline void *slab_alloc(struct kmem_cache *s, 2914 gfp_t gfpflags, unsigned long addr, size_t orig_size) 2915 { 2916 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); 2917 } 2918 2919 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2920 { 2921 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); 2922 2923 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 2924 s->size, gfpflags); 2925 2926 return ret; 2927 } 2928 EXPORT_SYMBOL(kmem_cache_alloc); 2929 2930 #ifdef CONFIG_TRACING 2931 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2932 { 2933 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); 2934 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2935 ret = kasan_kmalloc(s, ret, size, gfpflags); 2936 return ret; 2937 } 2938 EXPORT_SYMBOL(kmem_cache_alloc_trace); 2939 #endif 2940 2941 #ifdef CONFIG_NUMA 2942 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2943 { 2944 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); 2945 2946 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2947 s->object_size, s->size, gfpflags, node); 2948 2949 return ret; 2950 } 2951 EXPORT_SYMBOL(kmem_cache_alloc_node); 2952 2953 #ifdef CONFIG_TRACING 2954 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 2955 gfp_t gfpflags, 2956 int node, size_t size) 2957 { 2958 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); 2959 2960 trace_kmalloc_node(_RET_IP_, ret, 2961 size, s->size, gfpflags, node); 2962 2963 ret = kasan_kmalloc(s, ret, size, gfpflags); 2964 return ret; 2965 } 2966 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2967 #endif 2968 #endif /* CONFIG_NUMA */ 2969 2970 /* 2971 * Slow path handling. This may still be called frequently since objects 2972 * have a longer lifetime than the cpu slabs in most processing loads. 2973 * 2974 * So we still attempt to reduce cache line usage. Just take the slab 2975 * lock and free the item. If there is no additional partial page 2976 * handling required then we can return immediately. 2977 */ 2978 static void __slab_free(struct kmem_cache *s, struct page *page, 2979 void *head, void *tail, int cnt, 2980 unsigned long addr) 2981 2982 { 2983 void *prior; 2984 int was_frozen; 2985 struct page new; 2986 unsigned long counters; 2987 struct kmem_cache_node *n = NULL; 2988 unsigned long flags; 2989 2990 stat(s, FREE_SLOWPATH); 2991 2992 if (kfence_free(head)) 2993 return; 2994 2995 if (kmem_cache_debug(s) && 2996 !free_debug_processing(s, page, head, tail, cnt, addr)) 2997 return; 2998 2999 do { 3000 if (unlikely(n)) { 3001 spin_unlock_irqrestore(&n->list_lock, flags); 3002 n = NULL; 3003 } 3004 prior = page->freelist; 3005 counters = page->counters; 3006 set_freepointer(s, tail, prior); 3007 new.counters = counters; 3008 was_frozen = new.frozen; 3009 new.inuse -= cnt; 3010 if ((!new.inuse || !prior) && !was_frozen) { 3011 3012 if (kmem_cache_has_cpu_partial(s) && !prior) { 3013 3014 /* 3015 * Slab was on no list before and will be 3016 * partially empty 3017 * We can defer the list move and instead 3018 * freeze it. 3019 */ 3020 new.frozen = 1; 3021 3022 } else { /* Needs to be taken off a list */ 3023 3024 n = get_node(s, page_to_nid(page)); 3025 /* 3026 * Speculatively acquire the list_lock. 3027 * If the cmpxchg does not succeed then we may 3028 * drop the list_lock without any processing. 3029 * 3030 * Otherwise the list_lock will synchronize with 3031 * other processors updating the list of slabs. 3032 */ 3033 spin_lock_irqsave(&n->list_lock, flags); 3034 3035 } 3036 } 3037 3038 } while (!cmpxchg_double_slab(s, page, 3039 prior, counters, 3040 head, new.counters, 3041 "__slab_free")); 3042 3043 if (likely(!n)) { 3044 3045 if (likely(was_frozen)) { 3046 /* 3047 * The list lock was not taken therefore no list 3048 * activity can be necessary. 3049 */ 3050 stat(s, FREE_FROZEN); 3051 } else if (new.frozen) { 3052 /* 3053 * If we just froze the page then put it onto the 3054 * per cpu partial list. 3055 */ 3056 put_cpu_partial(s, page, 1); 3057 stat(s, CPU_PARTIAL_FREE); 3058 } 3059 3060 return; 3061 } 3062 3063 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3064 goto slab_empty; 3065 3066 /* 3067 * Objects left in the slab. If it was not on the partial list before 3068 * then add it. 3069 */ 3070 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3071 remove_full(s, n, page); 3072 add_partial(n, page, DEACTIVATE_TO_TAIL); 3073 stat(s, FREE_ADD_PARTIAL); 3074 } 3075 spin_unlock_irqrestore(&n->list_lock, flags); 3076 return; 3077 3078 slab_empty: 3079 if (prior) { 3080 /* 3081 * Slab on the partial list. 3082 */ 3083 remove_partial(n, page); 3084 stat(s, FREE_REMOVE_PARTIAL); 3085 } else { 3086 /* Slab must be on the full list */ 3087 remove_full(s, n, page); 3088 } 3089 3090 spin_unlock_irqrestore(&n->list_lock, flags); 3091 stat(s, FREE_SLAB); 3092 discard_slab(s, page); 3093 } 3094 3095 /* 3096 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3097 * can perform fastpath freeing without additional function calls. 3098 * 3099 * The fastpath is only possible if we are freeing to the current cpu slab 3100 * of this processor. This typically the case if we have just allocated 3101 * the item before. 3102 * 3103 * If fastpath is not possible then fall back to __slab_free where we deal 3104 * with all sorts of special processing. 3105 * 3106 * Bulk free of a freelist with several objects (all pointing to the 3107 * same page) possible by specifying head and tail ptr, plus objects 3108 * count (cnt). Bulk free indicated by tail pointer being set. 3109 */ 3110 static __always_inline void do_slab_free(struct kmem_cache *s, 3111 struct page *page, void *head, void *tail, 3112 int cnt, unsigned long addr) 3113 { 3114 void *tail_obj = tail ? : head; 3115 struct kmem_cache_cpu *c; 3116 unsigned long tid; 3117 3118 memcg_slab_free_hook(s, &head, 1); 3119 redo: 3120 /* 3121 * Determine the currently cpus per cpu slab. 3122 * The cpu may change afterward. However that does not matter since 3123 * data is retrieved via this pointer. If we are on the same cpu 3124 * during the cmpxchg then the free will succeed. 3125 */ 3126 do { 3127 tid = this_cpu_read(s->cpu_slab->tid); 3128 c = raw_cpu_ptr(s->cpu_slab); 3129 } while (IS_ENABLED(CONFIG_PREEMPTION) && 3130 unlikely(tid != READ_ONCE(c->tid))); 3131 3132 /* Same with comment on barrier() in slab_alloc_node() */ 3133 barrier(); 3134 3135 if (likely(page == c->page)) { 3136 void **freelist = READ_ONCE(c->freelist); 3137 3138 set_freepointer(s, tail_obj, freelist); 3139 3140 if (unlikely(!this_cpu_cmpxchg_double( 3141 s->cpu_slab->freelist, s->cpu_slab->tid, 3142 freelist, tid, 3143 head, next_tid(tid)))) { 3144 3145 note_cmpxchg_failure("slab_free", s, tid); 3146 goto redo; 3147 } 3148 stat(s, FREE_FASTPATH); 3149 } else 3150 __slab_free(s, page, head, tail_obj, cnt, addr); 3151 3152 } 3153 3154 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 3155 void *head, void *tail, int cnt, 3156 unsigned long addr) 3157 { 3158 /* 3159 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3160 * to remove objects, whose reuse must be delayed. 3161 */ 3162 if (slab_free_freelist_hook(s, &head, &tail)) 3163 do_slab_free(s, page, head, tail, cnt, addr); 3164 } 3165 3166 #ifdef CONFIG_KASAN_GENERIC 3167 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3168 { 3169 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 3170 } 3171 #endif 3172 3173 void kmem_cache_free(struct kmem_cache *s, void *x) 3174 { 3175 s = cache_from_obj(s, x); 3176 if (!s) 3177 return; 3178 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3179 trace_kmem_cache_free(_RET_IP_, x, s->name); 3180 } 3181 EXPORT_SYMBOL(kmem_cache_free); 3182 3183 struct detached_freelist { 3184 struct page *page; 3185 void *tail; 3186 void *freelist; 3187 int cnt; 3188 struct kmem_cache *s; 3189 }; 3190 3191 /* 3192 * This function progressively scans the array with free objects (with 3193 * a limited look ahead) and extract objects belonging to the same 3194 * page. It builds a detached freelist directly within the given 3195 * page/objects. This can happen without any need for 3196 * synchronization, because the objects are owned by running process. 3197 * The freelist is build up as a single linked list in the objects. 3198 * The idea is, that this detached freelist can then be bulk 3199 * transferred to the real freelist(s), but only requiring a single 3200 * synchronization primitive. Look ahead in the array is limited due 3201 * to performance reasons. 3202 */ 3203 static inline 3204 int build_detached_freelist(struct kmem_cache *s, size_t size, 3205 void **p, struct detached_freelist *df) 3206 { 3207 size_t first_skipped_index = 0; 3208 int lookahead = 3; 3209 void *object; 3210 struct page *page; 3211 3212 /* Always re-init detached_freelist */ 3213 df->page = NULL; 3214 3215 do { 3216 object = p[--size]; 3217 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3218 } while (!object && size); 3219 3220 if (!object) 3221 return 0; 3222 3223 page = virt_to_head_page(object); 3224 if (!s) { 3225 /* Handle kalloc'ed objects */ 3226 if (unlikely(!PageSlab(page))) { 3227 BUG_ON(!PageCompound(page)); 3228 kfree_hook(object); 3229 __free_pages(page, compound_order(page)); 3230 p[size] = NULL; /* mark object processed */ 3231 return size; 3232 } 3233 /* Derive kmem_cache from object */ 3234 df->s = page->slab_cache; 3235 } else { 3236 df->s = cache_from_obj(s, object); /* Support for memcg */ 3237 } 3238 3239 if (is_kfence_address(object)) { 3240 slab_free_hook(df->s, object); 3241 __kfence_free(object); 3242 p[size] = NULL; /* mark object processed */ 3243 return size; 3244 } 3245 3246 /* Start new detached freelist */ 3247 df->page = page; 3248 set_freepointer(df->s, object, NULL); 3249 df->tail = object; 3250 df->freelist = object; 3251 p[size] = NULL; /* mark object processed */ 3252 df->cnt = 1; 3253 3254 while (size) { 3255 object = p[--size]; 3256 if (!object) 3257 continue; /* Skip processed objects */ 3258 3259 /* df->page is always set at this point */ 3260 if (df->page == virt_to_head_page(object)) { 3261 /* Opportunity build freelist */ 3262 set_freepointer(df->s, object, df->freelist); 3263 df->freelist = object; 3264 df->cnt++; 3265 p[size] = NULL; /* mark object processed */ 3266 3267 continue; 3268 } 3269 3270 /* Limit look ahead search */ 3271 if (!--lookahead) 3272 break; 3273 3274 if (!first_skipped_index) 3275 first_skipped_index = size + 1; 3276 } 3277 3278 return first_skipped_index; 3279 } 3280 3281 /* Note that interrupts must be enabled when calling this function. */ 3282 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3283 { 3284 if (WARN_ON(!size)) 3285 return; 3286 3287 memcg_slab_free_hook(s, p, size); 3288 do { 3289 struct detached_freelist df; 3290 3291 size = build_detached_freelist(s, size, p, &df); 3292 if (!df.page) 3293 continue; 3294 3295 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 3296 } while (likely(size)); 3297 } 3298 EXPORT_SYMBOL(kmem_cache_free_bulk); 3299 3300 /* Note that interrupts must be enabled when calling this function. */ 3301 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3302 void **p) 3303 { 3304 struct kmem_cache_cpu *c; 3305 int i; 3306 struct obj_cgroup *objcg = NULL; 3307 3308 /* memcg and kmem_cache debug support */ 3309 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3310 if (unlikely(!s)) 3311 return false; 3312 /* 3313 * Drain objects in the per cpu slab, while disabling local 3314 * IRQs, which protects against PREEMPT and interrupts 3315 * handlers invoking normal fastpath. 3316 */ 3317 local_irq_disable(); 3318 c = this_cpu_ptr(s->cpu_slab); 3319 3320 for (i = 0; i < size; i++) { 3321 void *object = kfence_alloc(s, s->object_size, flags); 3322 3323 if (unlikely(object)) { 3324 p[i] = object; 3325 continue; 3326 } 3327 3328 object = c->freelist; 3329 if (unlikely(!object)) { 3330 /* 3331 * We may have removed an object from c->freelist using 3332 * the fastpath in the previous iteration; in that case, 3333 * c->tid has not been bumped yet. 3334 * Since ___slab_alloc() may reenable interrupts while 3335 * allocating memory, we should bump c->tid now. 3336 */ 3337 c->tid = next_tid(c->tid); 3338 3339 /* 3340 * Invoking slow path likely have side-effect 3341 * of re-populating per CPU c->freelist 3342 */ 3343 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3344 _RET_IP_, c); 3345 if (unlikely(!p[i])) 3346 goto error; 3347 3348 c = this_cpu_ptr(s->cpu_slab); 3349 maybe_wipe_obj_freeptr(s, p[i]); 3350 3351 continue; /* goto for-loop */ 3352 } 3353 c->freelist = get_freepointer(s, object); 3354 p[i] = object; 3355 maybe_wipe_obj_freeptr(s, p[i]); 3356 } 3357 c->tid = next_tid(c->tid); 3358 local_irq_enable(); 3359 3360 /* Clear memory outside IRQ disabled fastpath loop */ 3361 if (unlikely(slab_want_init_on_alloc(flags, s))) { 3362 int j; 3363 3364 for (j = 0; j < i; j++) 3365 memset(kasan_reset_tag(p[j]), 0, s->object_size); 3366 } 3367 3368 /* memcg and kmem_cache debug support */ 3369 slab_post_alloc_hook(s, objcg, flags, size, p); 3370 return i; 3371 error: 3372 local_irq_enable(); 3373 slab_post_alloc_hook(s, objcg, flags, i, p); 3374 __kmem_cache_free_bulk(s, i, p); 3375 return 0; 3376 } 3377 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3378 3379 3380 /* 3381 * Object placement in a slab is made very easy because we always start at 3382 * offset 0. If we tune the size of the object to the alignment then we can 3383 * get the required alignment by putting one properly sized object after 3384 * another. 3385 * 3386 * Notice that the allocation order determines the sizes of the per cpu 3387 * caches. Each processor has always one slab available for allocations. 3388 * Increasing the allocation order reduces the number of times that slabs 3389 * must be moved on and off the partial lists and is therefore a factor in 3390 * locking overhead. 3391 */ 3392 3393 /* 3394 * Mininum / Maximum order of slab pages. This influences locking overhead 3395 * and slab fragmentation. A higher order reduces the number of partial slabs 3396 * and increases the number of allocations possible without having to 3397 * take the list_lock. 3398 */ 3399 static unsigned int slub_min_order; 3400 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3401 static unsigned int slub_min_objects; 3402 3403 /* 3404 * Calculate the order of allocation given an slab object size. 3405 * 3406 * The order of allocation has significant impact on performance and other 3407 * system components. Generally order 0 allocations should be preferred since 3408 * order 0 does not cause fragmentation in the page allocator. Larger objects 3409 * be problematic to put into order 0 slabs because there may be too much 3410 * unused space left. We go to a higher order if more than 1/16th of the slab 3411 * would be wasted. 3412 * 3413 * In order to reach satisfactory performance we must ensure that a minimum 3414 * number of objects is in one slab. Otherwise we may generate too much 3415 * activity on the partial lists which requires taking the list_lock. This is 3416 * less a concern for large slabs though which are rarely used. 3417 * 3418 * slub_max_order specifies the order where we begin to stop considering the 3419 * number of objects in a slab as critical. If we reach slub_max_order then 3420 * we try to keep the page order as low as possible. So we accept more waste 3421 * of space in favor of a small page order. 3422 * 3423 * Higher order allocations also allow the placement of more objects in a 3424 * slab and thereby reduce object handling overhead. If the user has 3425 * requested a higher mininum order then we start with that one instead of 3426 * the smallest order which will fit the object. 3427 */ 3428 static inline unsigned int slab_order(unsigned int size, 3429 unsigned int min_objects, unsigned int max_order, 3430 unsigned int fract_leftover) 3431 { 3432 unsigned int min_order = slub_min_order; 3433 unsigned int order; 3434 3435 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3436 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3437 3438 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3439 order <= max_order; order++) { 3440 3441 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3442 unsigned int rem; 3443 3444 rem = slab_size % size; 3445 3446 if (rem <= slab_size / fract_leftover) 3447 break; 3448 } 3449 3450 return order; 3451 } 3452 3453 static inline int calculate_order(unsigned int size) 3454 { 3455 unsigned int order; 3456 unsigned int min_objects; 3457 unsigned int max_objects; 3458 unsigned int nr_cpus; 3459 3460 /* 3461 * Attempt to find best configuration for a slab. This 3462 * works by first attempting to generate a layout with 3463 * the best configuration and backing off gradually. 3464 * 3465 * First we increase the acceptable waste in a slab. Then 3466 * we reduce the minimum objects required in a slab. 3467 */ 3468 min_objects = slub_min_objects; 3469 if (!min_objects) { 3470 /* 3471 * Some architectures will only update present cpus when 3472 * onlining them, so don't trust the number if it's just 1. But 3473 * we also don't want to use nr_cpu_ids always, as on some other 3474 * architectures, there can be many possible cpus, but never 3475 * onlined. Here we compromise between trying to avoid too high 3476 * order on systems that appear larger than they are, and too 3477 * low order on systems that appear smaller than they are. 3478 */ 3479 nr_cpus = num_present_cpus(); 3480 if (nr_cpus <= 1) 3481 nr_cpus = nr_cpu_ids; 3482 min_objects = 4 * (fls(nr_cpus) + 1); 3483 } 3484 max_objects = order_objects(slub_max_order, size); 3485 min_objects = min(min_objects, max_objects); 3486 3487 while (min_objects > 1) { 3488 unsigned int fraction; 3489 3490 fraction = 16; 3491 while (fraction >= 4) { 3492 order = slab_order(size, min_objects, 3493 slub_max_order, fraction); 3494 if (order <= slub_max_order) 3495 return order; 3496 fraction /= 2; 3497 } 3498 min_objects--; 3499 } 3500 3501 /* 3502 * We were unable to place multiple objects in a slab. Now 3503 * lets see if we can place a single object there. 3504 */ 3505 order = slab_order(size, 1, slub_max_order, 1); 3506 if (order <= slub_max_order) 3507 return order; 3508 3509 /* 3510 * Doh this slab cannot be placed using slub_max_order. 3511 */ 3512 order = slab_order(size, 1, MAX_ORDER, 1); 3513 if (order < MAX_ORDER) 3514 return order; 3515 return -ENOSYS; 3516 } 3517 3518 static void 3519 init_kmem_cache_node(struct kmem_cache_node *n) 3520 { 3521 n->nr_partial = 0; 3522 spin_lock_init(&n->list_lock); 3523 INIT_LIST_HEAD(&n->partial); 3524 #ifdef CONFIG_SLUB_DEBUG 3525 atomic_long_set(&n->nr_slabs, 0); 3526 atomic_long_set(&n->total_objects, 0); 3527 INIT_LIST_HEAD(&n->full); 3528 #endif 3529 } 3530 3531 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3532 { 3533 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3534 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3535 3536 /* 3537 * Must align to double word boundary for the double cmpxchg 3538 * instructions to work; see __pcpu_double_call_return_bool(). 3539 */ 3540 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3541 2 * sizeof(void *)); 3542 3543 if (!s->cpu_slab) 3544 return 0; 3545 3546 init_kmem_cache_cpus(s); 3547 3548 return 1; 3549 } 3550 3551 static struct kmem_cache *kmem_cache_node; 3552 3553 /* 3554 * No kmalloc_node yet so do it by hand. We know that this is the first 3555 * slab on the node for this slabcache. There are no concurrent accesses 3556 * possible. 3557 * 3558 * Note that this function only works on the kmem_cache_node 3559 * when allocating for the kmem_cache_node. This is used for bootstrapping 3560 * memory on a fresh node that has no slab structures yet. 3561 */ 3562 static void early_kmem_cache_node_alloc(int node) 3563 { 3564 struct page *page; 3565 struct kmem_cache_node *n; 3566 3567 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3568 3569 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3570 3571 BUG_ON(!page); 3572 if (page_to_nid(page) != node) { 3573 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3574 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3575 } 3576 3577 n = page->freelist; 3578 BUG_ON(!n); 3579 #ifdef CONFIG_SLUB_DEBUG 3580 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3581 init_tracking(kmem_cache_node, n); 3582 #endif 3583 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL); 3584 page->freelist = get_freepointer(kmem_cache_node, n); 3585 page->inuse = 1; 3586 page->frozen = 0; 3587 kmem_cache_node->node[node] = n; 3588 init_kmem_cache_node(n); 3589 inc_slabs_node(kmem_cache_node, node, page->objects); 3590 3591 /* 3592 * No locks need to be taken here as it has just been 3593 * initialized and there is no concurrent access. 3594 */ 3595 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3596 } 3597 3598 static void free_kmem_cache_nodes(struct kmem_cache *s) 3599 { 3600 int node; 3601 struct kmem_cache_node *n; 3602 3603 for_each_kmem_cache_node(s, node, n) { 3604 s->node[node] = NULL; 3605 kmem_cache_free(kmem_cache_node, n); 3606 } 3607 } 3608 3609 void __kmem_cache_release(struct kmem_cache *s) 3610 { 3611 cache_random_seq_destroy(s); 3612 free_percpu(s->cpu_slab); 3613 free_kmem_cache_nodes(s); 3614 } 3615 3616 static int init_kmem_cache_nodes(struct kmem_cache *s) 3617 { 3618 int node; 3619 3620 for_each_node_mask(node, slab_nodes) { 3621 struct kmem_cache_node *n; 3622 3623 if (slab_state == DOWN) { 3624 early_kmem_cache_node_alloc(node); 3625 continue; 3626 } 3627 n = kmem_cache_alloc_node(kmem_cache_node, 3628 GFP_KERNEL, node); 3629 3630 if (!n) { 3631 free_kmem_cache_nodes(s); 3632 return 0; 3633 } 3634 3635 init_kmem_cache_node(n); 3636 s->node[node] = n; 3637 } 3638 return 1; 3639 } 3640 3641 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3642 { 3643 if (min < MIN_PARTIAL) 3644 min = MIN_PARTIAL; 3645 else if (min > MAX_PARTIAL) 3646 min = MAX_PARTIAL; 3647 s->min_partial = min; 3648 } 3649 3650 static void set_cpu_partial(struct kmem_cache *s) 3651 { 3652 #ifdef CONFIG_SLUB_CPU_PARTIAL 3653 /* 3654 * cpu_partial determined the maximum number of objects kept in the 3655 * per cpu partial lists of a processor. 3656 * 3657 * Per cpu partial lists mainly contain slabs that just have one 3658 * object freed. If they are used for allocation then they can be 3659 * filled up again with minimal effort. The slab will never hit the 3660 * per node partial lists and therefore no locking will be required. 3661 * 3662 * This setting also determines 3663 * 3664 * A) The number of objects from per cpu partial slabs dumped to the 3665 * per node list when we reach the limit. 3666 * B) The number of objects in cpu partial slabs to extract from the 3667 * per node list when we run out of per cpu objects. We only fetch 3668 * 50% to keep some capacity around for frees. 3669 */ 3670 if (!kmem_cache_has_cpu_partial(s)) 3671 slub_set_cpu_partial(s, 0); 3672 else if (s->size >= PAGE_SIZE) 3673 slub_set_cpu_partial(s, 2); 3674 else if (s->size >= 1024) 3675 slub_set_cpu_partial(s, 6); 3676 else if (s->size >= 256) 3677 slub_set_cpu_partial(s, 13); 3678 else 3679 slub_set_cpu_partial(s, 30); 3680 #endif 3681 } 3682 3683 /* 3684 * calculate_sizes() determines the order and the distribution of data within 3685 * a slab object. 3686 */ 3687 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3688 { 3689 slab_flags_t flags = s->flags; 3690 unsigned int size = s->object_size; 3691 unsigned int freepointer_area; 3692 unsigned int order; 3693 3694 /* 3695 * Round up object size to the next word boundary. We can only 3696 * place the free pointer at word boundaries and this determines 3697 * the possible location of the free pointer. 3698 */ 3699 size = ALIGN(size, sizeof(void *)); 3700 /* 3701 * This is the area of the object where a freepointer can be 3702 * safely written. If redzoning adds more to the inuse size, we 3703 * can't use that portion for writing the freepointer, so 3704 * s->offset must be limited within this for the general case. 3705 */ 3706 freepointer_area = size; 3707 3708 #ifdef CONFIG_SLUB_DEBUG 3709 /* 3710 * Determine if we can poison the object itself. If the user of 3711 * the slab may touch the object after free or before allocation 3712 * then we should never poison the object itself. 3713 */ 3714 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 3715 !s->ctor) 3716 s->flags |= __OBJECT_POISON; 3717 else 3718 s->flags &= ~__OBJECT_POISON; 3719 3720 3721 /* 3722 * If we are Redzoning then check if there is some space between the 3723 * end of the object and the free pointer. If not then add an 3724 * additional word to have some bytes to store Redzone information. 3725 */ 3726 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 3727 size += sizeof(void *); 3728 #endif 3729 3730 /* 3731 * With that we have determined the number of bytes in actual use 3732 * by the object. This is the potential offset to the free pointer. 3733 */ 3734 s->inuse = size; 3735 3736 if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 3737 s->ctor)) { 3738 /* 3739 * Relocate free pointer after the object if it is not 3740 * permitted to overwrite the first word of the object on 3741 * kmem_cache_free. 3742 * 3743 * This is the case if we do RCU, have a constructor or 3744 * destructor or are poisoning the objects. 3745 * 3746 * The assumption that s->offset >= s->inuse means free 3747 * pointer is outside of the object is used in the 3748 * freeptr_outside_object() function. If that is no 3749 * longer true, the function needs to be modified. 3750 */ 3751 s->offset = size; 3752 size += sizeof(void *); 3753 } else if (freepointer_area > sizeof(void *)) { 3754 /* 3755 * Store freelist pointer near middle of object to keep 3756 * it away from the edges of the object to avoid small 3757 * sized over/underflows from neighboring allocations. 3758 */ 3759 s->offset = ALIGN(freepointer_area / 2, sizeof(void *)); 3760 } 3761 3762 #ifdef CONFIG_SLUB_DEBUG 3763 if (flags & SLAB_STORE_USER) 3764 /* 3765 * Need to store information about allocs and frees after 3766 * the object. 3767 */ 3768 size += 2 * sizeof(struct track); 3769 #endif 3770 3771 kasan_cache_create(s, &size, &s->flags); 3772 #ifdef CONFIG_SLUB_DEBUG 3773 if (flags & SLAB_RED_ZONE) { 3774 /* 3775 * Add some empty padding so that we can catch 3776 * overwrites from earlier objects rather than let 3777 * tracking information or the free pointer be 3778 * corrupted if a user writes before the start 3779 * of the object. 3780 */ 3781 size += sizeof(void *); 3782 3783 s->red_left_pad = sizeof(void *); 3784 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 3785 size += s->red_left_pad; 3786 } 3787 #endif 3788 3789 /* 3790 * SLUB stores one object immediately after another beginning from 3791 * offset 0. In order to align the objects we have to simply size 3792 * each object to conform to the alignment. 3793 */ 3794 size = ALIGN(size, s->align); 3795 s->size = size; 3796 s->reciprocal_size = reciprocal_value(size); 3797 if (forced_order >= 0) 3798 order = forced_order; 3799 else 3800 order = calculate_order(size); 3801 3802 if ((int)order < 0) 3803 return 0; 3804 3805 s->allocflags = 0; 3806 if (order) 3807 s->allocflags |= __GFP_COMP; 3808 3809 if (s->flags & SLAB_CACHE_DMA) 3810 s->allocflags |= GFP_DMA; 3811 3812 if (s->flags & SLAB_CACHE_DMA32) 3813 s->allocflags |= GFP_DMA32; 3814 3815 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3816 s->allocflags |= __GFP_RECLAIMABLE; 3817 3818 /* 3819 * Determine the number of objects per slab 3820 */ 3821 s->oo = oo_make(order, size); 3822 s->min = oo_make(get_order(size), size); 3823 if (oo_objects(s->oo) > oo_objects(s->max)) 3824 s->max = s->oo; 3825 3826 return !!oo_objects(s->oo); 3827 } 3828 3829 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3830 { 3831 s->flags = kmem_cache_flags(s->size, flags, s->name); 3832 #ifdef CONFIG_SLAB_FREELIST_HARDENED 3833 s->random = get_random_long(); 3834 #endif 3835 3836 if (!calculate_sizes(s, -1)) 3837 goto error; 3838 if (disable_higher_order_debug) { 3839 /* 3840 * Disable debugging flags that store metadata if the min slab 3841 * order increased. 3842 */ 3843 if (get_order(s->size) > get_order(s->object_size)) { 3844 s->flags &= ~DEBUG_METADATA_FLAGS; 3845 s->offset = 0; 3846 if (!calculate_sizes(s, -1)) 3847 goto error; 3848 } 3849 } 3850 3851 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 3852 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 3853 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 3854 /* Enable fast mode */ 3855 s->flags |= __CMPXCHG_DOUBLE; 3856 #endif 3857 3858 /* 3859 * The larger the object size is, the more pages we want on the partial 3860 * list to avoid pounding the page allocator excessively. 3861 */ 3862 set_min_partial(s, ilog2(s->size) / 2); 3863 3864 set_cpu_partial(s); 3865 3866 #ifdef CONFIG_NUMA 3867 s->remote_node_defrag_ratio = 1000; 3868 #endif 3869 3870 /* Initialize the pre-computed randomized freelist if slab is up */ 3871 if (slab_state >= UP) { 3872 if (init_cache_random_seq(s)) 3873 goto error; 3874 } 3875 3876 if (!init_kmem_cache_nodes(s)) 3877 goto error; 3878 3879 if (alloc_kmem_cache_cpus(s)) 3880 return 0; 3881 3882 free_kmem_cache_nodes(s); 3883 error: 3884 return -EINVAL; 3885 } 3886 3887 static void list_slab_objects(struct kmem_cache *s, struct page *page, 3888 const char *text) 3889 { 3890 #ifdef CONFIG_SLUB_DEBUG 3891 void *addr = page_address(page); 3892 unsigned long *map; 3893 void *p; 3894 3895 slab_err(s, page, text, s->name); 3896 slab_lock(page); 3897 3898 map = get_map(s, page); 3899 for_each_object(p, s, addr, page->objects) { 3900 3901 if (!test_bit(__obj_to_index(s, addr, p), map)) { 3902 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 3903 print_tracking(s, p); 3904 } 3905 } 3906 put_map(map); 3907 slab_unlock(page); 3908 #endif 3909 } 3910 3911 /* 3912 * Attempt to free all partial slabs on a node. 3913 * This is called from __kmem_cache_shutdown(). We must take list_lock 3914 * because sysfs file might still access partial list after the shutdowning. 3915 */ 3916 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3917 { 3918 LIST_HEAD(discard); 3919 struct page *page, *h; 3920 3921 BUG_ON(irqs_disabled()); 3922 spin_lock_irq(&n->list_lock); 3923 list_for_each_entry_safe(page, h, &n->partial, slab_list) { 3924 if (!page->inuse) { 3925 remove_partial(n, page); 3926 list_add(&page->slab_list, &discard); 3927 } else { 3928 list_slab_objects(s, page, 3929 "Objects remaining in %s on __kmem_cache_shutdown()"); 3930 } 3931 } 3932 spin_unlock_irq(&n->list_lock); 3933 3934 list_for_each_entry_safe(page, h, &discard, slab_list) 3935 discard_slab(s, page); 3936 } 3937 3938 bool __kmem_cache_empty(struct kmem_cache *s) 3939 { 3940 int node; 3941 struct kmem_cache_node *n; 3942 3943 for_each_kmem_cache_node(s, node, n) 3944 if (n->nr_partial || slabs_node(s, node)) 3945 return false; 3946 return true; 3947 } 3948 3949 /* 3950 * Release all resources used by a slab cache. 3951 */ 3952 int __kmem_cache_shutdown(struct kmem_cache *s) 3953 { 3954 int node; 3955 struct kmem_cache_node *n; 3956 3957 flush_all(s); 3958 /* Attempt to free all objects */ 3959 for_each_kmem_cache_node(s, node, n) { 3960 free_partial(s, n); 3961 if (n->nr_partial || slabs_node(s, node)) 3962 return 1; 3963 } 3964 return 0; 3965 } 3966 3967 #ifdef CONFIG_PRINTK 3968 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) 3969 { 3970 void *base; 3971 int __maybe_unused i; 3972 unsigned int objnr; 3973 void *objp; 3974 void *objp0; 3975 struct kmem_cache *s = page->slab_cache; 3976 struct track __maybe_unused *trackp; 3977 3978 kpp->kp_ptr = object; 3979 kpp->kp_page = page; 3980 kpp->kp_slab_cache = s; 3981 base = page_address(page); 3982 objp0 = kasan_reset_tag(object); 3983 #ifdef CONFIG_SLUB_DEBUG 3984 objp = restore_red_left(s, objp0); 3985 #else 3986 objp = objp0; 3987 #endif 3988 objnr = obj_to_index(s, page, objp); 3989 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 3990 objp = base + s->size * objnr; 3991 kpp->kp_objp = objp; 3992 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || 3993 !(s->flags & SLAB_STORE_USER)) 3994 return; 3995 #ifdef CONFIG_SLUB_DEBUG 3996 trackp = get_track(s, objp, TRACK_ALLOC); 3997 kpp->kp_ret = (void *)trackp->addr; 3998 #ifdef CONFIG_STACKTRACE 3999 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { 4000 kpp->kp_stack[i] = (void *)trackp->addrs[i]; 4001 if (!kpp->kp_stack[i]) 4002 break; 4003 } 4004 #endif 4005 #endif 4006 } 4007 #endif 4008 4009 /******************************************************************** 4010 * Kmalloc subsystem 4011 *******************************************************************/ 4012 4013 static int __init setup_slub_min_order(char *str) 4014 { 4015 get_option(&str, (int *)&slub_min_order); 4016 4017 return 1; 4018 } 4019 4020 __setup("slub_min_order=", setup_slub_min_order); 4021 4022 static int __init setup_slub_max_order(char *str) 4023 { 4024 get_option(&str, (int *)&slub_max_order); 4025 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4026 4027 return 1; 4028 } 4029 4030 __setup("slub_max_order=", setup_slub_max_order); 4031 4032 static int __init setup_slub_min_objects(char *str) 4033 { 4034 get_option(&str, (int *)&slub_min_objects); 4035 4036 return 1; 4037 } 4038 4039 __setup("slub_min_objects=", setup_slub_min_objects); 4040 4041 void *__kmalloc(size_t size, gfp_t flags) 4042 { 4043 struct kmem_cache *s; 4044 void *ret; 4045 4046 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4047 return kmalloc_large(size, flags); 4048 4049 s = kmalloc_slab(size, flags); 4050 4051 if (unlikely(ZERO_OR_NULL_PTR(s))) 4052 return s; 4053 4054 ret = slab_alloc(s, flags, _RET_IP_, size); 4055 4056 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 4057 4058 ret = kasan_kmalloc(s, ret, size, flags); 4059 4060 return ret; 4061 } 4062 EXPORT_SYMBOL(__kmalloc); 4063 4064 #ifdef CONFIG_NUMA 4065 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4066 { 4067 struct page *page; 4068 void *ptr = NULL; 4069 unsigned int order = get_order(size); 4070 4071 flags |= __GFP_COMP; 4072 page = alloc_pages_node(node, flags, order); 4073 if (page) { 4074 ptr = page_address(page); 4075 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4076 PAGE_SIZE << order); 4077 } 4078 4079 return kmalloc_large_node_hook(ptr, size, flags); 4080 } 4081 4082 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4083 { 4084 struct kmem_cache *s; 4085 void *ret; 4086 4087 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4088 ret = kmalloc_large_node(size, flags, node); 4089 4090 trace_kmalloc_node(_RET_IP_, ret, 4091 size, PAGE_SIZE << get_order(size), 4092 flags, node); 4093 4094 return ret; 4095 } 4096 4097 s = kmalloc_slab(size, flags); 4098 4099 if (unlikely(ZERO_OR_NULL_PTR(s))) 4100 return s; 4101 4102 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); 4103 4104 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 4105 4106 ret = kasan_kmalloc(s, ret, size, flags); 4107 4108 return ret; 4109 } 4110 EXPORT_SYMBOL(__kmalloc_node); 4111 #endif /* CONFIG_NUMA */ 4112 4113 #ifdef CONFIG_HARDENED_USERCOPY 4114 /* 4115 * Rejects incorrectly sized objects and objects that are to be copied 4116 * to/from userspace but do not fall entirely within the containing slab 4117 * cache's usercopy region. 4118 * 4119 * Returns NULL if check passes, otherwise const char * to name of cache 4120 * to indicate an error. 4121 */ 4122 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4123 bool to_user) 4124 { 4125 struct kmem_cache *s; 4126 unsigned int offset; 4127 size_t object_size; 4128 bool is_kfence = is_kfence_address(ptr); 4129 4130 ptr = kasan_reset_tag(ptr); 4131 4132 /* Find object and usable object size. */ 4133 s = page->slab_cache; 4134 4135 /* Reject impossible pointers. */ 4136 if (ptr < page_address(page)) 4137 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4138 to_user, 0, n); 4139 4140 /* Find offset within object. */ 4141 if (is_kfence) 4142 offset = ptr - kfence_object_start(ptr); 4143 else 4144 offset = (ptr - page_address(page)) % s->size; 4145 4146 /* Adjust for redzone and reject if within the redzone. */ 4147 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4148 if (offset < s->red_left_pad) 4149 usercopy_abort("SLUB object in left red zone", 4150 s->name, to_user, offset, n); 4151 offset -= s->red_left_pad; 4152 } 4153 4154 /* Allow address range falling entirely within usercopy region. */ 4155 if (offset >= s->useroffset && 4156 offset - s->useroffset <= s->usersize && 4157 n <= s->useroffset - offset + s->usersize) 4158 return; 4159 4160 /* 4161 * If the copy is still within the allocated object, produce 4162 * a warning instead of rejecting the copy. This is intended 4163 * to be a temporary method to find any missing usercopy 4164 * whitelists. 4165 */ 4166 object_size = slab_ksize(s); 4167 if (usercopy_fallback && 4168 offset <= object_size && n <= object_size - offset) { 4169 usercopy_warn("SLUB object", s->name, to_user, offset, n); 4170 return; 4171 } 4172 4173 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4174 } 4175 #endif /* CONFIG_HARDENED_USERCOPY */ 4176 4177 size_t __ksize(const void *object) 4178 { 4179 struct page *page; 4180 4181 if (unlikely(object == ZERO_SIZE_PTR)) 4182 return 0; 4183 4184 page = virt_to_head_page(object); 4185 4186 if (unlikely(!PageSlab(page))) { 4187 WARN_ON(!PageCompound(page)); 4188 return page_size(page); 4189 } 4190 4191 return slab_ksize(page->slab_cache); 4192 } 4193 EXPORT_SYMBOL(__ksize); 4194 4195 void kfree(const void *x) 4196 { 4197 struct page *page; 4198 void *object = (void *)x; 4199 4200 trace_kfree(_RET_IP_, x); 4201 4202 if (unlikely(ZERO_OR_NULL_PTR(x))) 4203 return; 4204 4205 page = virt_to_head_page(x); 4206 if (unlikely(!PageSlab(page))) { 4207 unsigned int order = compound_order(page); 4208 4209 BUG_ON(!PageCompound(page)); 4210 kfree_hook(object); 4211 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4212 -(PAGE_SIZE << order)); 4213 __free_pages(page, order); 4214 return; 4215 } 4216 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 4217 } 4218 EXPORT_SYMBOL(kfree); 4219 4220 #define SHRINK_PROMOTE_MAX 32 4221 4222 /* 4223 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4224 * up most to the head of the partial lists. New allocations will then 4225 * fill those up and thus they can be removed from the partial lists. 4226 * 4227 * The slabs with the least items are placed last. This results in them 4228 * being allocated from last increasing the chance that the last objects 4229 * are freed in them. 4230 */ 4231 int __kmem_cache_shrink(struct kmem_cache *s) 4232 { 4233 int node; 4234 int i; 4235 struct kmem_cache_node *n; 4236 struct page *page; 4237 struct page *t; 4238 struct list_head discard; 4239 struct list_head promote[SHRINK_PROMOTE_MAX]; 4240 unsigned long flags; 4241 int ret = 0; 4242 4243 flush_all(s); 4244 for_each_kmem_cache_node(s, node, n) { 4245 INIT_LIST_HEAD(&discard); 4246 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4247 INIT_LIST_HEAD(promote + i); 4248 4249 spin_lock_irqsave(&n->list_lock, flags); 4250 4251 /* 4252 * Build lists of slabs to discard or promote. 4253 * 4254 * Note that concurrent frees may occur while we hold the 4255 * list_lock. page->inuse here is the upper limit. 4256 */ 4257 list_for_each_entry_safe(page, t, &n->partial, slab_list) { 4258 int free = page->objects - page->inuse; 4259 4260 /* Do not reread page->inuse */ 4261 barrier(); 4262 4263 /* We do not keep full slabs on the list */ 4264 BUG_ON(free <= 0); 4265 4266 if (free == page->objects) { 4267 list_move(&page->slab_list, &discard); 4268 n->nr_partial--; 4269 } else if (free <= SHRINK_PROMOTE_MAX) 4270 list_move(&page->slab_list, promote + free - 1); 4271 } 4272 4273 /* 4274 * Promote the slabs filled up most to the head of the 4275 * partial list. 4276 */ 4277 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4278 list_splice(promote + i, &n->partial); 4279 4280 spin_unlock_irqrestore(&n->list_lock, flags); 4281 4282 /* Release empty slabs */ 4283 list_for_each_entry_safe(page, t, &discard, slab_list) 4284 discard_slab(s, page); 4285 4286 if (slabs_node(s, node)) 4287 ret = 1; 4288 } 4289 4290 return ret; 4291 } 4292 4293 static int slab_mem_going_offline_callback(void *arg) 4294 { 4295 struct kmem_cache *s; 4296 4297 mutex_lock(&slab_mutex); 4298 list_for_each_entry(s, &slab_caches, list) 4299 __kmem_cache_shrink(s); 4300 mutex_unlock(&slab_mutex); 4301 4302 return 0; 4303 } 4304 4305 static void slab_mem_offline_callback(void *arg) 4306 { 4307 struct memory_notify *marg = arg; 4308 int offline_node; 4309 4310 offline_node = marg->status_change_nid_normal; 4311 4312 /* 4313 * If the node still has available memory. we need kmem_cache_node 4314 * for it yet. 4315 */ 4316 if (offline_node < 0) 4317 return; 4318 4319 mutex_lock(&slab_mutex); 4320 node_clear(offline_node, slab_nodes); 4321 /* 4322 * We no longer free kmem_cache_node structures here, as it would be 4323 * racy with all get_node() users, and infeasible to protect them with 4324 * slab_mutex. 4325 */ 4326 mutex_unlock(&slab_mutex); 4327 } 4328 4329 static int slab_mem_going_online_callback(void *arg) 4330 { 4331 struct kmem_cache_node *n; 4332 struct kmem_cache *s; 4333 struct memory_notify *marg = arg; 4334 int nid = marg->status_change_nid_normal; 4335 int ret = 0; 4336 4337 /* 4338 * If the node's memory is already available, then kmem_cache_node is 4339 * already created. Nothing to do. 4340 */ 4341 if (nid < 0) 4342 return 0; 4343 4344 /* 4345 * We are bringing a node online. No memory is available yet. We must 4346 * allocate a kmem_cache_node structure in order to bring the node 4347 * online. 4348 */ 4349 mutex_lock(&slab_mutex); 4350 list_for_each_entry(s, &slab_caches, list) { 4351 /* 4352 * The structure may already exist if the node was previously 4353 * onlined and offlined. 4354 */ 4355 if (get_node(s, nid)) 4356 continue; 4357 /* 4358 * XXX: kmem_cache_alloc_node will fallback to other nodes 4359 * since memory is not yet available from the node that 4360 * is brought up. 4361 */ 4362 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4363 if (!n) { 4364 ret = -ENOMEM; 4365 goto out; 4366 } 4367 init_kmem_cache_node(n); 4368 s->node[nid] = n; 4369 } 4370 /* 4371 * Any cache created after this point will also have kmem_cache_node 4372 * initialized for the new node. 4373 */ 4374 node_set(nid, slab_nodes); 4375 out: 4376 mutex_unlock(&slab_mutex); 4377 return ret; 4378 } 4379 4380 static int slab_memory_callback(struct notifier_block *self, 4381 unsigned long action, void *arg) 4382 { 4383 int ret = 0; 4384 4385 switch (action) { 4386 case MEM_GOING_ONLINE: 4387 ret = slab_mem_going_online_callback(arg); 4388 break; 4389 case MEM_GOING_OFFLINE: 4390 ret = slab_mem_going_offline_callback(arg); 4391 break; 4392 case MEM_OFFLINE: 4393 case MEM_CANCEL_ONLINE: 4394 slab_mem_offline_callback(arg); 4395 break; 4396 case MEM_ONLINE: 4397 case MEM_CANCEL_OFFLINE: 4398 break; 4399 } 4400 if (ret) 4401 ret = notifier_from_errno(ret); 4402 else 4403 ret = NOTIFY_OK; 4404 return ret; 4405 } 4406 4407 static struct notifier_block slab_memory_callback_nb = { 4408 .notifier_call = slab_memory_callback, 4409 .priority = SLAB_CALLBACK_PRI, 4410 }; 4411 4412 /******************************************************************** 4413 * Basic setup of slabs 4414 *******************************************************************/ 4415 4416 /* 4417 * Used for early kmem_cache structures that were allocated using 4418 * the page allocator. Allocate them properly then fix up the pointers 4419 * that may be pointing to the wrong kmem_cache structure. 4420 */ 4421 4422 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4423 { 4424 int node; 4425 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4426 struct kmem_cache_node *n; 4427 4428 memcpy(s, static_cache, kmem_cache->object_size); 4429 4430 /* 4431 * This runs very early, and only the boot processor is supposed to be 4432 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4433 * IPIs around. 4434 */ 4435 __flush_cpu_slab(s, smp_processor_id()); 4436 for_each_kmem_cache_node(s, node, n) { 4437 struct page *p; 4438 4439 list_for_each_entry(p, &n->partial, slab_list) 4440 p->slab_cache = s; 4441 4442 #ifdef CONFIG_SLUB_DEBUG 4443 list_for_each_entry(p, &n->full, slab_list) 4444 p->slab_cache = s; 4445 #endif 4446 } 4447 list_add(&s->list, &slab_caches); 4448 return s; 4449 } 4450 4451 void __init kmem_cache_init(void) 4452 { 4453 static __initdata struct kmem_cache boot_kmem_cache, 4454 boot_kmem_cache_node; 4455 int node; 4456 4457 if (debug_guardpage_minorder()) 4458 slub_max_order = 0; 4459 4460 kmem_cache_node = &boot_kmem_cache_node; 4461 kmem_cache = &boot_kmem_cache; 4462 4463 /* 4464 * Initialize the nodemask for which we will allocate per node 4465 * structures. Here we don't need taking slab_mutex yet. 4466 */ 4467 for_each_node_state(node, N_NORMAL_MEMORY) 4468 node_set(node, slab_nodes); 4469 4470 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4471 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4472 4473 register_hotmemory_notifier(&slab_memory_callback_nb); 4474 4475 /* Able to allocate the per node structures */ 4476 slab_state = PARTIAL; 4477 4478 create_boot_cache(kmem_cache, "kmem_cache", 4479 offsetof(struct kmem_cache, node) + 4480 nr_node_ids * sizeof(struct kmem_cache_node *), 4481 SLAB_HWCACHE_ALIGN, 0, 0); 4482 4483 kmem_cache = bootstrap(&boot_kmem_cache); 4484 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4485 4486 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4487 setup_kmalloc_cache_index_table(); 4488 create_kmalloc_caches(0); 4489 4490 /* Setup random freelists for each cache */ 4491 init_freelist_randomization(); 4492 4493 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4494 slub_cpu_dead); 4495 4496 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4497 cache_line_size(), 4498 slub_min_order, slub_max_order, slub_min_objects, 4499 nr_cpu_ids, nr_node_ids); 4500 } 4501 4502 void __init kmem_cache_init_late(void) 4503 { 4504 } 4505 4506 struct kmem_cache * 4507 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4508 slab_flags_t flags, void (*ctor)(void *)) 4509 { 4510 struct kmem_cache *s; 4511 4512 s = find_mergeable(size, align, flags, name, ctor); 4513 if (s) { 4514 s->refcount++; 4515 4516 /* 4517 * Adjust the object sizes so that we clear 4518 * the complete object on kzalloc. 4519 */ 4520 s->object_size = max(s->object_size, size); 4521 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4522 4523 if (sysfs_slab_alias(s, name)) { 4524 s->refcount--; 4525 s = NULL; 4526 } 4527 } 4528 4529 return s; 4530 } 4531 4532 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4533 { 4534 int err; 4535 4536 err = kmem_cache_open(s, flags); 4537 if (err) 4538 return err; 4539 4540 /* Mutex is not taken during early boot */ 4541 if (slab_state <= UP) 4542 return 0; 4543 4544 err = sysfs_slab_add(s); 4545 if (err) 4546 __kmem_cache_release(s); 4547 4548 return err; 4549 } 4550 4551 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4552 { 4553 struct kmem_cache *s; 4554 void *ret; 4555 4556 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4557 return kmalloc_large(size, gfpflags); 4558 4559 s = kmalloc_slab(size, gfpflags); 4560 4561 if (unlikely(ZERO_OR_NULL_PTR(s))) 4562 return s; 4563 4564 ret = slab_alloc(s, gfpflags, caller, size); 4565 4566 /* Honor the call site pointer we received. */ 4567 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4568 4569 return ret; 4570 } 4571 EXPORT_SYMBOL(__kmalloc_track_caller); 4572 4573 #ifdef CONFIG_NUMA 4574 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4575 int node, unsigned long caller) 4576 { 4577 struct kmem_cache *s; 4578 void *ret; 4579 4580 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4581 ret = kmalloc_large_node(size, gfpflags, node); 4582 4583 trace_kmalloc_node(caller, ret, 4584 size, PAGE_SIZE << get_order(size), 4585 gfpflags, node); 4586 4587 return ret; 4588 } 4589 4590 s = kmalloc_slab(size, gfpflags); 4591 4592 if (unlikely(ZERO_OR_NULL_PTR(s))) 4593 return s; 4594 4595 ret = slab_alloc_node(s, gfpflags, node, caller, size); 4596 4597 /* Honor the call site pointer we received. */ 4598 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4599 4600 return ret; 4601 } 4602 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4603 #endif 4604 4605 #ifdef CONFIG_SYSFS 4606 static int count_inuse(struct page *page) 4607 { 4608 return page->inuse; 4609 } 4610 4611 static int count_total(struct page *page) 4612 { 4613 return page->objects; 4614 } 4615 #endif 4616 4617 #ifdef CONFIG_SLUB_DEBUG 4618 static void validate_slab(struct kmem_cache *s, struct page *page) 4619 { 4620 void *p; 4621 void *addr = page_address(page); 4622 unsigned long *map; 4623 4624 slab_lock(page); 4625 4626 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) 4627 goto unlock; 4628 4629 /* Now we know that a valid freelist exists */ 4630 map = get_map(s, page); 4631 for_each_object(p, s, addr, page->objects) { 4632 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? 4633 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4634 4635 if (!check_object(s, page, p, val)) 4636 break; 4637 } 4638 put_map(map); 4639 unlock: 4640 slab_unlock(page); 4641 } 4642 4643 static int validate_slab_node(struct kmem_cache *s, 4644 struct kmem_cache_node *n) 4645 { 4646 unsigned long count = 0; 4647 struct page *page; 4648 unsigned long flags; 4649 4650 spin_lock_irqsave(&n->list_lock, flags); 4651 4652 list_for_each_entry(page, &n->partial, slab_list) { 4653 validate_slab(s, page); 4654 count++; 4655 } 4656 if (count != n->nr_partial) 4657 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4658 s->name, count, n->nr_partial); 4659 4660 if (!(s->flags & SLAB_STORE_USER)) 4661 goto out; 4662 4663 list_for_each_entry(page, &n->full, slab_list) { 4664 validate_slab(s, page); 4665 count++; 4666 } 4667 if (count != atomic_long_read(&n->nr_slabs)) 4668 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 4669 s->name, count, atomic_long_read(&n->nr_slabs)); 4670 4671 out: 4672 spin_unlock_irqrestore(&n->list_lock, flags); 4673 return count; 4674 } 4675 4676 static long validate_slab_cache(struct kmem_cache *s) 4677 { 4678 int node; 4679 unsigned long count = 0; 4680 struct kmem_cache_node *n; 4681 4682 flush_all(s); 4683 for_each_kmem_cache_node(s, node, n) 4684 count += validate_slab_node(s, n); 4685 4686 return count; 4687 } 4688 /* 4689 * Generate lists of code addresses where slabcache objects are allocated 4690 * and freed. 4691 */ 4692 4693 struct location { 4694 unsigned long count; 4695 unsigned long addr; 4696 long long sum_time; 4697 long min_time; 4698 long max_time; 4699 long min_pid; 4700 long max_pid; 4701 DECLARE_BITMAP(cpus, NR_CPUS); 4702 nodemask_t nodes; 4703 }; 4704 4705 struct loc_track { 4706 unsigned long max; 4707 unsigned long count; 4708 struct location *loc; 4709 }; 4710 4711 static void free_loc_track(struct loc_track *t) 4712 { 4713 if (t->max) 4714 free_pages((unsigned long)t->loc, 4715 get_order(sizeof(struct location) * t->max)); 4716 } 4717 4718 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 4719 { 4720 struct location *l; 4721 int order; 4722 4723 order = get_order(sizeof(struct location) * max); 4724 4725 l = (void *)__get_free_pages(flags, order); 4726 if (!l) 4727 return 0; 4728 4729 if (t->count) { 4730 memcpy(l, t->loc, sizeof(struct location) * t->count); 4731 free_loc_track(t); 4732 } 4733 t->max = max; 4734 t->loc = l; 4735 return 1; 4736 } 4737 4738 static int add_location(struct loc_track *t, struct kmem_cache *s, 4739 const struct track *track) 4740 { 4741 long start, end, pos; 4742 struct location *l; 4743 unsigned long caddr; 4744 unsigned long age = jiffies - track->when; 4745 4746 start = -1; 4747 end = t->count; 4748 4749 for ( ; ; ) { 4750 pos = start + (end - start + 1) / 2; 4751 4752 /* 4753 * There is nothing at "end". If we end up there 4754 * we need to add something to before end. 4755 */ 4756 if (pos == end) 4757 break; 4758 4759 caddr = t->loc[pos].addr; 4760 if (track->addr == caddr) { 4761 4762 l = &t->loc[pos]; 4763 l->count++; 4764 if (track->when) { 4765 l->sum_time += age; 4766 if (age < l->min_time) 4767 l->min_time = age; 4768 if (age > l->max_time) 4769 l->max_time = age; 4770 4771 if (track->pid < l->min_pid) 4772 l->min_pid = track->pid; 4773 if (track->pid > l->max_pid) 4774 l->max_pid = track->pid; 4775 4776 cpumask_set_cpu(track->cpu, 4777 to_cpumask(l->cpus)); 4778 } 4779 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4780 return 1; 4781 } 4782 4783 if (track->addr < caddr) 4784 end = pos; 4785 else 4786 start = pos; 4787 } 4788 4789 /* 4790 * Not found. Insert new tracking element. 4791 */ 4792 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 4793 return 0; 4794 4795 l = t->loc + pos; 4796 if (pos < t->count) 4797 memmove(l + 1, l, 4798 (t->count - pos) * sizeof(struct location)); 4799 t->count++; 4800 l->count = 1; 4801 l->addr = track->addr; 4802 l->sum_time = age; 4803 l->min_time = age; 4804 l->max_time = age; 4805 l->min_pid = track->pid; 4806 l->max_pid = track->pid; 4807 cpumask_clear(to_cpumask(l->cpus)); 4808 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 4809 nodes_clear(l->nodes); 4810 node_set(page_to_nid(virt_to_page(track)), l->nodes); 4811 return 1; 4812 } 4813 4814 static void process_slab(struct loc_track *t, struct kmem_cache *s, 4815 struct page *page, enum track_item alloc) 4816 { 4817 void *addr = page_address(page); 4818 void *p; 4819 unsigned long *map; 4820 4821 map = get_map(s, page); 4822 for_each_object(p, s, addr, page->objects) 4823 if (!test_bit(__obj_to_index(s, addr, p), map)) 4824 add_location(t, s, get_track(s, p, alloc)); 4825 put_map(map); 4826 } 4827 4828 static int list_locations(struct kmem_cache *s, char *buf, 4829 enum track_item alloc) 4830 { 4831 int len = 0; 4832 unsigned long i; 4833 struct loc_track t = { 0, 0, NULL }; 4834 int node; 4835 struct kmem_cache_node *n; 4836 4837 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4838 GFP_KERNEL)) { 4839 return sysfs_emit(buf, "Out of memory\n"); 4840 } 4841 /* Push back cpu slabs */ 4842 flush_all(s); 4843 4844 for_each_kmem_cache_node(s, node, n) { 4845 unsigned long flags; 4846 struct page *page; 4847 4848 if (!atomic_long_read(&n->nr_slabs)) 4849 continue; 4850 4851 spin_lock_irqsave(&n->list_lock, flags); 4852 list_for_each_entry(page, &n->partial, slab_list) 4853 process_slab(&t, s, page, alloc); 4854 list_for_each_entry(page, &n->full, slab_list) 4855 process_slab(&t, s, page, alloc); 4856 spin_unlock_irqrestore(&n->list_lock, flags); 4857 } 4858 4859 for (i = 0; i < t.count; i++) { 4860 struct location *l = &t.loc[i]; 4861 4862 len += sysfs_emit_at(buf, len, "%7ld ", l->count); 4863 4864 if (l->addr) 4865 len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr); 4866 else 4867 len += sysfs_emit_at(buf, len, "<not-available>"); 4868 4869 if (l->sum_time != l->min_time) 4870 len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld", 4871 l->min_time, 4872 (long)div_u64(l->sum_time, 4873 l->count), 4874 l->max_time); 4875 else 4876 len += sysfs_emit_at(buf, len, " age=%ld", l->min_time); 4877 4878 if (l->min_pid != l->max_pid) 4879 len += sysfs_emit_at(buf, len, " pid=%ld-%ld", 4880 l->min_pid, l->max_pid); 4881 else 4882 len += sysfs_emit_at(buf, len, " pid=%ld", 4883 l->min_pid); 4884 4885 if (num_online_cpus() > 1 && 4886 !cpumask_empty(to_cpumask(l->cpus))) 4887 len += sysfs_emit_at(buf, len, " cpus=%*pbl", 4888 cpumask_pr_args(to_cpumask(l->cpus))); 4889 4890 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 4891 len += sysfs_emit_at(buf, len, " nodes=%*pbl", 4892 nodemask_pr_args(&l->nodes)); 4893 4894 len += sysfs_emit_at(buf, len, "\n"); 4895 } 4896 4897 free_loc_track(&t); 4898 if (!t.count) 4899 len += sysfs_emit_at(buf, len, "No data\n"); 4900 4901 return len; 4902 } 4903 #endif /* CONFIG_SLUB_DEBUG */ 4904 4905 #ifdef SLUB_RESILIENCY_TEST 4906 static void __init resiliency_test(void) 4907 { 4908 u8 *p; 4909 int type = KMALLOC_NORMAL; 4910 4911 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); 4912 4913 pr_err("SLUB resiliency testing\n"); 4914 pr_err("-----------------------\n"); 4915 pr_err("A. Corruption after allocation\n"); 4916 4917 p = kzalloc(16, GFP_KERNEL); 4918 p[16] = 0x12; 4919 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", 4920 p + 16); 4921 4922 validate_slab_cache(kmalloc_caches[type][4]); 4923 4924 /* Hmmm... The next two are dangerous */ 4925 p = kzalloc(32, GFP_KERNEL); 4926 p[32 + sizeof(void *)] = 0x34; 4927 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", 4928 p); 4929 pr_err("If allocated object is overwritten then not detectable\n\n"); 4930 4931 validate_slab_cache(kmalloc_caches[type][5]); 4932 p = kzalloc(64, GFP_KERNEL); 4933 p += 64 + (get_cycles() & 0xff) * sizeof(void *); 4934 *p = 0x56; 4935 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", 4936 p); 4937 pr_err("If allocated object is overwritten then not detectable\n\n"); 4938 validate_slab_cache(kmalloc_caches[type][6]); 4939 4940 pr_err("\nB. Corruption after free\n"); 4941 p = kzalloc(128, GFP_KERNEL); 4942 kfree(p); 4943 *p = 0x78; 4944 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); 4945 validate_slab_cache(kmalloc_caches[type][7]); 4946 4947 p = kzalloc(256, GFP_KERNEL); 4948 kfree(p); 4949 p[50] = 0x9a; 4950 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); 4951 validate_slab_cache(kmalloc_caches[type][8]); 4952 4953 p = kzalloc(512, GFP_KERNEL); 4954 kfree(p); 4955 p[512] = 0xab; 4956 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); 4957 validate_slab_cache(kmalloc_caches[type][9]); 4958 } 4959 #else 4960 #ifdef CONFIG_SYSFS 4961 static void resiliency_test(void) {}; 4962 #endif 4963 #endif /* SLUB_RESILIENCY_TEST */ 4964 4965 #ifdef CONFIG_SYSFS 4966 enum slab_stat_type { 4967 SL_ALL, /* All slabs */ 4968 SL_PARTIAL, /* Only partially allocated slabs */ 4969 SL_CPU, /* Only slabs used for cpu caches */ 4970 SL_OBJECTS, /* Determine allocated objects not slabs */ 4971 SL_TOTAL /* Determine object capacity not slabs */ 4972 }; 4973 4974 #define SO_ALL (1 << SL_ALL) 4975 #define SO_PARTIAL (1 << SL_PARTIAL) 4976 #define SO_CPU (1 << SL_CPU) 4977 #define SO_OBJECTS (1 << SL_OBJECTS) 4978 #define SO_TOTAL (1 << SL_TOTAL) 4979 4980 static ssize_t show_slab_objects(struct kmem_cache *s, 4981 char *buf, unsigned long flags) 4982 { 4983 unsigned long total = 0; 4984 int node; 4985 int x; 4986 unsigned long *nodes; 4987 int len = 0; 4988 4989 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 4990 if (!nodes) 4991 return -ENOMEM; 4992 4993 if (flags & SO_CPU) { 4994 int cpu; 4995 4996 for_each_possible_cpu(cpu) { 4997 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 4998 cpu); 4999 int node; 5000 struct page *page; 5001 5002 page = READ_ONCE(c->page); 5003 if (!page) 5004 continue; 5005 5006 node = page_to_nid(page); 5007 if (flags & SO_TOTAL) 5008 x = page->objects; 5009 else if (flags & SO_OBJECTS) 5010 x = page->inuse; 5011 else 5012 x = 1; 5013 5014 total += x; 5015 nodes[node] += x; 5016 5017 page = slub_percpu_partial_read_once(c); 5018 if (page) { 5019 node = page_to_nid(page); 5020 if (flags & SO_TOTAL) 5021 WARN_ON_ONCE(1); 5022 else if (flags & SO_OBJECTS) 5023 WARN_ON_ONCE(1); 5024 else 5025 x = page->pages; 5026 total += x; 5027 nodes[node] += x; 5028 } 5029 } 5030 } 5031 5032 /* 5033 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 5034 * already held which will conflict with an existing lock order: 5035 * 5036 * mem_hotplug_lock->slab_mutex->kernfs_mutex 5037 * 5038 * We don't really need mem_hotplug_lock (to hold off 5039 * slab_mem_going_offline_callback) here because slab's memory hot 5040 * unplug code doesn't destroy the kmem_cache->node[] data. 5041 */ 5042 5043 #ifdef CONFIG_SLUB_DEBUG 5044 if (flags & SO_ALL) { 5045 struct kmem_cache_node *n; 5046 5047 for_each_kmem_cache_node(s, node, n) { 5048 5049 if (flags & SO_TOTAL) 5050 x = atomic_long_read(&n->total_objects); 5051 else if (flags & SO_OBJECTS) 5052 x = atomic_long_read(&n->total_objects) - 5053 count_partial(n, count_free); 5054 else 5055 x = atomic_long_read(&n->nr_slabs); 5056 total += x; 5057 nodes[node] += x; 5058 } 5059 5060 } else 5061 #endif 5062 if (flags & SO_PARTIAL) { 5063 struct kmem_cache_node *n; 5064 5065 for_each_kmem_cache_node(s, node, n) { 5066 if (flags & SO_TOTAL) 5067 x = count_partial(n, count_total); 5068 else if (flags & SO_OBJECTS) 5069 x = count_partial(n, count_inuse); 5070 else 5071 x = n->nr_partial; 5072 total += x; 5073 nodes[node] += x; 5074 } 5075 } 5076 5077 len += sysfs_emit_at(buf, len, "%lu", total); 5078 #ifdef CONFIG_NUMA 5079 for (node = 0; node < nr_node_ids; node++) { 5080 if (nodes[node]) 5081 len += sysfs_emit_at(buf, len, " N%d=%lu", 5082 node, nodes[node]); 5083 } 5084 #endif 5085 len += sysfs_emit_at(buf, len, "\n"); 5086 kfree(nodes); 5087 5088 return len; 5089 } 5090 5091 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5092 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5093 5094 struct slab_attribute { 5095 struct attribute attr; 5096 ssize_t (*show)(struct kmem_cache *s, char *buf); 5097 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5098 }; 5099 5100 #define SLAB_ATTR_RO(_name) \ 5101 static struct slab_attribute _name##_attr = \ 5102 __ATTR(_name, 0400, _name##_show, NULL) 5103 5104 #define SLAB_ATTR(_name) \ 5105 static struct slab_attribute _name##_attr = \ 5106 __ATTR(_name, 0600, _name##_show, _name##_store) 5107 5108 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5109 { 5110 return sysfs_emit(buf, "%u\n", s->size); 5111 } 5112 SLAB_ATTR_RO(slab_size); 5113 5114 static ssize_t align_show(struct kmem_cache *s, char *buf) 5115 { 5116 return sysfs_emit(buf, "%u\n", s->align); 5117 } 5118 SLAB_ATTR_RO(align); 5119 5120 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5121 { 5122 return sysfs_emit(buf, "%u\n", s->object_size); 5123 } 5124 SLAB_ATTR_RO(object_size); 5125 5126 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5127 { 5128 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5129 } 5130 SLAB_ATTR_RO(objs_per_slab); 5131 5132 static ssize_t order_show(struct kmem_cache *s, char *buf) 5133 { 5134 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5135 } 5136 SLAB_ATTR_RO(order); 5137 5138 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5139 { 5140 return sysfs_emit(buf, "%lu\n", s->min_partial); 5141 } 5142 5143 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5144 size_t length) 5145 { 5146 unsigned long min; 5147 int err; 5148 5149 err = kstrtoul(buf, 10, &min); 5150 if (err) 5151 return err; 5152 5153 set_min_partial(s, min); 5154 return length; 5155 } 5156 SLAB_ATTR(min_partial); 5157 5158 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5159 { 5160 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s)); 5161 } 5162 5163 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5164 size_t length) 5165 { 5166 unsigned int objects; 5167 int err; 5168 5169 err = kstrtouint(buf, 10, &objects); 5170 if (err) 5171 return err; 5172 if (objects && !kmem_cache_has_cpu_partial(s)) 5173 return -EINVAL; 5174 5175 slub_set_cpu_partial(s, objects); 5176 flush_all(s); 5177 return length; 5178 } 5179 SLAB_ATTR(cpu_partial); 5180 5181 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5182 { 5183 if (!s->ctor) 5184 return 0; 5185 return sysfs_emit(buf, "%pS\n", s->ctor); 5186 } 5187 SLAB_ATTR_RO(ctor); 5188 5189 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5190 { 5191 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5192 } 5193 SLAB_ATTR_RO(aliases); 5194 5195 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5196 { 5197 return show_slab_objects(s, buf, SO_PARTIAL); 5198 } 5199 SLAB_ATTR_RO(partial); 5200 5201 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5202 { 5203 return show_slab_objects(s, buf, SO_CPU); 5204 } 5205 SLAB_ATTR_RO(cpu_slabs); 5206 5207 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5208 { 5209 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5210 } 5211 SLAB_ATTR_RO(objects); 5212 5213 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5214 { 5215 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5216 } 5217 SLAB_ATTR_RO(objects_partial); 5218 5219 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5220 { 5221 int objects = 0; 5222 int pages = 0; 5223 int cpu; 5224 int len = 0; 5225 5226 for_each_online_cpu(cpu) { 5227 struct page *page; 5228 5229 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5230 5231 if (page) { 5232 pages += page->pages; 5233 objects += page->pobjects; 5234 } 5235 } 5236 5237 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); 5238 5239 #ifdef CONFIG_SMP 5240 for_each_online_cpu(cpu) { 5241 struct page *page; 5242 5243 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5244 if (page) 5245 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5246 cpu, page->pobjects, page->pages); 5247 } 5248 #endif 5249 len += sysfs_emit_at(buf, len, "\n"); 5250 5251 return len; 5252 } 5253 SLAB_ATTR_RO(slabs_cpu_partial); 5254 5255 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5256 { 5257 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5258 } 5259 SLAB_ATTR_RO(reclaim_account); 5260 5261 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5262 { 5263 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5264 } 5265 SLAB_ATTR_RO(hwcache_align); 5266 5267 #ifdef CONFIG_ZONE_DMA 5268 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5269 { 5270 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5271 } 5272 SLAB_ATTR_RO(cache_dma); 5273 #endif 5274 5275 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5276 { 5277 return sysfs_emit(buf, "%u\n", s->usersize); 5278 } 5279 SLAB_ATTR_RO(usersize); 5280 5281 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5282 { 5283 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5284 } 5285 SLAB_ATTR_RO(destroy_by_rcu); 5286 5287 #ifdef CONFIG_SLUB_DEBUG 5288 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5289 { 5290 return show_slab_objects(s, buf, SO_ALL); 5291 } 5292 SLAB_ATTR_RO(slabs); 5293 5294 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5295 { 5296 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5297 } 5298 SLAB_ATTR_RO(total_objects); 5299 5300 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5301 { 5302 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5303 } 5304 SLAB_ATTR_RO(sanity_checks); 5305 5306 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5307 { 5308 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5309 } 5310 SLAB_ATTR_RO(trace); 5311 5312 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5313 { 5314 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5315 } 5316 5317 SLAB_ATTR_RO(red_zone); 5318 5319 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5320 { 5321 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5322 } 5323 5324 SLAB_ATTR_RO(poison); 5325 5326 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5327 { 5328 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5329 } 5330 5331 SLAB_ATTR_RO(store_user); 5332 5333 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5334 { 5335 return 0; 5336 } 5337 5338 static ssize_t validate_store(struct kmem_cache *s, 5339 const char *buf, size_t length) 5340 { 5341 int ret = -EINVAL; 5342 5343 if (buf[0] == '1') { 5344 ret = validate_slab_cache(s); 5345 if (ret >= 0) 5346 ret = length; 5347 } 5348 return ret; 5349 } 5350 SLAB_ATTR(validate); 5351 5352 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) 5353 { 5354 if (!(s->flags & SLAB_STORE_USER)) 5355 return -ENOSYS; 5356 return list_locations(s, buf, TRACK_ALLOC); 5357 } 5358 SLAB_ATTR_RO(alloc_calls); 5359 5360 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) 5361 { 5362 if (!(s->flags & SLAB_STORE_USER)) 5363 return -ENOSYS; 5364 return list_locations(s, buf, TRACK_FREE); 5365 } 5366 SLAB_ATTR_RO(free_calls); 5367 #endif /* CONFIG_SLUB_DEBUG */ 5368 5369 #ifdef CONFIG_FAILSLAB 5370 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5371 { 5372 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5373 } 5374 SLAB_ATTR_RO(failslab); 5375 #endif 5376 5377 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5378 { 5379 return 0; 5380 } 5381 5382 static ssize_t shrink_store(struct kmem_cache *s, 5383 const char *buf, size_t length) 5384 { 5385 if (buf[0] == '1') 5386 kmem_cache_shrink(s); 5387 else 5388 return -EINVAL; 5389 return length; 5390 } 5391 SLAB_ATTR(shrink); 5392 5393 #ifdef CONFIG_NUMA 5394 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5395 { 5396 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5397 } 5398 5399 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5400 const char *buf, size_t length) 5401 { 5402 unsigned int ratio; 5403 int err; 5404 5405 err = kstrtouint(buf, 10, &ratio); 5406 if (err) 5407 return err; 5408 if (ratio > 100) 5409 return -ERANGE; 5410 5411 s->remote_node_defrag_ratio = ratio * 10; 5412 5413 return length; 5414 } 5415 SLAB_ATTR(remote_node_defrag_ratio); 5416 #endif 5417 5418 #ifdef CONFIG_SLUB_STATS 5419 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5420 { 5421 unsigned long sum = 0; 5422 int cpu; 5423 int len = 0; 5424 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5425 5426 if (!data) 5427 return -ENOMEM; 5428 5429 for_each_online_cpu(cpu) { 5430 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5431 5432 data[cpu] = x; 5433 sum += x; 5434 } 5435 5436 len += sysfs_emit_at(buf, len, "%lu", sum); 5437 5438 #ifdef CONFIG_SMP 5439 for_each_online_cpu(cpu) { 5440 if (data[cpu]) 5441 len += sysfs_emit_at(buf, len, " C%d=%u", 5442 cpu, data[cpu]); 5443 } 5444 #endif 5445 kfree(data); 5446 len += sysfs_emit_at(buf, len, "\n"); 5447 5448 return len; 5449 } 5450 5451 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5452 { 5453 int cpu; 5454 5455 for_each_online_cpu(cpu) 5456 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5457 } 5458 5459 #define STAT_ATTR(si, text) \ 5460 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5461 { \ 5462 return show_stat(s, buf, si); \ 5463 } \ 5464 static ssize_t text##_store(struct kmem_cache *s, \ 5465 const char *buf, size_t length) \ 5466 { \ 5467 if (buf[0] != '0') \ 5468 return -EINVAL; \ 5469 clear_stat(s, si); \ 5470 return length; \ 5471 } \ 5472 SLAB_ATTR(text); \ 5473 5474 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5475 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5476 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5477 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5478 STAT_ATTR(FREE_FROZEN, free_frozen); 5479 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5480 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5481 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5482 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5483 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5484 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5485 STAT_ATTR(FREE_SLAB, free_slab); 5486 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5487 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5488 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5489 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5490 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5491 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5492 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5493 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5494 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5495 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5496 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5497 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5498 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5499 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5500 #endif /* CONFIG_SLUB_STATS */ 5501 5502 static struct attribute *slab_attrs[] = { 5503 &slab_size_attr.attr, 5504 &object_size_attr.attr, 5505 &objs_per_slab_attr.attr, 5506 &order_attr.attr, 5507 &min_partial_attr.attr, 5508 &cpu_partial_attr.attr, 5509 &objects_attr.attr, 5510 &objects_partial_attr.attr, 5511 &partial_attr.attr, 5512 &cpu_slabs_attr.attr, 5513 &ctor_attr.attr, 5514 &aliases_attr.attr, 5515 &align_attr.attr, 5516 &hwcache_align_attr.attr, 5517 &reclaim_account_attr.attr, 5518 &destroy_by_rcu_attr.attr, 5519 &shrink_attr.attr, 5520 &slabs_cpu_partial_attr.attr, 5521 #ifdef CONFIG_SLUB_DEBUG 5522 &total_objects_attr.attr, 5523 &slabs_attr.attr, 5524 &sanity_checks_attr.attr, 5525 &trace_attr.attr, 5526 &red_zone_attr.attr, 5527 &poison_attr.attr, 5528 &store_user_attr.attr, 5529 &validate_attr.attr, 5530 &alloc_calls_attr.attr, 5531 &free_calls_attr.attr, 5532 #endif 5533 #ifdef CONFIG_ZONE_DMA 5534 &cache_dma_attr.attr, 5535 #endif 5536 #ifdef CONFIG_NUMA 5537 &remote_node_defrag_ratio_attr.attr, 5538 #endif 5539 #ifdef CONFIG_SLUB_STATS 5540 &alloc_fastpath_attr.attr, 5541 &alloc_slowpath_attr.attr, 5542 &free_fastpath_attr.attr, 5543 &free_slowpath_attr.attr, 5544 &free_frozen_attr.attr, 5545 &free_add_partial_attr.attr, 5546 &free_remove_partial_attr.attr, 5547 &alloc_from_partial_attr.attr, 5548 &alloc_slab_attr.attr, 5549 &alloc_refill_attr.attr, 5550 &alloc_node_mismatch_attr.attr, 5551 &free_slab_attr.attr, 5552 &cpuslab_flush_attr.attr, 5553 &deactivate_full_attr.attr, 5554 &deactivate_empty_attr.attr, 5555 &deactivate_to_head_attr.attr, 5556 &deactivate_to_tail_attr.attr, 5557 &deactivate_remote_frees_attr.attr, 5558 &deactivate_bypass_attr.attr, 5559 &order_fallback_attr.attr, 5560 &cmpxchg_double_fail_attr.attr, 5561 &cmpxchg_double_cpu_fail_attr.attr, 5562 &cpu_partial_alloc_attr.attr, 5563 &cpu_partial_free_attr.attr, 5564 &cpu_partial_node_attr.attr, 5565 &cpu_partial_drain_attr.attr, 5566 #endif 5567 #ifdef CONFIG_FAILSLAB 5568 &failslab_attr.attr, 5569 #endif 5570 &usersize_attr.attr, 5571 5572 NULL 5573 }; 5574 5575 static const struct attribute_group slab_attr_group = { 5576 .attrs = slab_attrs, 5577 }; 5578 5579 static ssize_t slab_attr_show(struct kobject *kobj, 5580 struct attribute *attr, 5581 char *buf) 5582 { 5583 struct slab_attribute *attribute; 5584 struct kmem_cache *s; 5585 int err; 5586 5587 attribute = to_slab_attr(attr); 5588 s = to_slab(kobj); 5589 5590 if (!attribute->show) 5591 return -EIO; 5592 5593 err = attribute->show(s, buf); 5594 5595 return err; 5596 } 5597 5598 static ssize_t slab_attr_store(struct kobject *kobj, 5599 struct attribute *attr, 5600 const char *buf, size_t len) 5601 { 5602 struct slab_attribute *attribute; 5603 struct kmem_cache *s; 5604 int err; 5605 5606 attribute = to_slab_attr(attr); 5607 s = to_slab(kobj); 5608 5609 if (!attribute->store) 5610 return -EIO; 5611 5612 err = attribute->store(s, buf, len); 5613 return err; 5614 } 5615 5616 static void kmem_cache_release(struct kobject *k) 5617 { 5618 slab_kmem_cache_release(to_slab(k)); 5619 } 5620 5621 static const struct sysfs_ops slab_sysfs_ops = { 5622 .show = slab_attr_show, 5623 .store = slab_attr_store, 5624 }; 5625 5626 static struct kobj_type slab_ktype = { 5627 .sysfs_ops = &slab_sysfs_ops, 5628 .release = kmem_cache_release, 5629 }; 5630 5631 static struct kset *slab_kset; 5632 5633 static inline struct kset *cache_kset(struct kmem_cache *s) 5634 { 5635 return slab_kset; 5636 } 5637 5638 #define ID_STR_LENGTH 64 5639 5640 /* Create a unique string id for a slab cache: 5641 * 5642 * Format :[flags-]size 5643 */ 5644 static char *create_unique_id(struct kmem_cache *s) 5645 { 5646 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5647 char *p = name; 5648 5649 BUG_ON(!name); 5650 5651 *p++ = ':'; 5652 /* 5653 * First flags affecting slabcache operations. We will only 5654 * get here for aliasable slabs so we do not need to support 5655 * too many flags. The flags here must cover all flags that 5656 * are matched during merging to guarantee that the id is 5657 * unique. 5658 */ 5659 if (s->flags & SLAB_CACHE_DMA) 5660 *p++ = 'd'; 5661 if (s->flags & SLAB_CACHE_DMA32) 5662 *p++ = 'D'; 5663 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5664 *p++ = 'a'; 5665 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5666 *p++ = 'F'; 5667 if (s->flags & SLAB_ACCOUNT) 5668 *p++ = 'A'; 5669 if (p != name + 1) 5670 *p++ = '-'; 5671 p += sprintf(p, "%07u", s->size); 5672 5673 BUG_ON(p > name + ID_STR_LENGTH - 1); 5674 return name; 5675 } 5676 5677 static int sysfs_slab_add(struct kmem_cache *s) 5678 { 5679 int err; 5680 const char *name; 5681 struct kset *kset = cache_kset(s); 5682 int unmergeable = slab_unmergeable(s); 5683 5684 if (!kset) { 5685 kobject_init(&s->kobj, &slab_ktype); 5686 return 0; 5687 } 5688 5689 if (!unmergeable && disable_higher_order_debug && 5690 (slub_debug & DEBUG_METADATA_FLAGS)) 5691 unmergeable = 1; 5692 5693 if (unmergeable) { 5694 /* 5695 * Slabcache can never be merged so we can use the name proper. 5696 * This is typically the case for debug situations. In that 5697 * case we can catch duplicate names easily. 5698 */ 5699 sysfs_remove_link(&slab_kset->kobj, s->name); 5700 name = s->name; 5701 } else { 5702 /* 5703 * Create a unique name for the slab as a target 5704 * for the symlinks. 5705 */ 5706 name = create_unique_id(s); 5707 } 5708 5709 s->kobj.kset = kset; 5710 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5711 if (err) 5712 goto out; 5713 5714 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5715 if (err) 5716 goto out_del_kobj; 5717 5718 if (!unmergeable) { 5719 /* Setup first alias */ 5720 sysfs_slab_alias(s, s->name); 5721 } 5722 out: 5723 if (!unmergeable) 5724 kfree(name); 5725 return err; 5726 out_del_kobj: 5727 kobject_del(&s->kobj); 5728 goto out; 5729 } 5730 5731 void sysfs_slab_unlink(struct kmem_cache *s) 5732 { 5733 if (slab_state >= FULL) 5734 kobject_del(&s->kobj); 5735 } 5736 5737 void sysfs_slab_release(struct kmem_cache *s) 5738 { 5739 if (slab_state >= FULL) 5740 kobject_put(&s->kobj); 5741 } 5742 5743 /* 5744 * Need to buffer aliases during bootup until sysfs becomes 5745 * available lest we lose that information. 5746 */ 5747 struct saved_alias { 5748 struct kmem_cache *s; 5749 const char *name; 5750 struct saved_alias *next; 5751 }; 5752 5753 static struct saved_alias *alias_list; 5754 5755 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5756 { 5757 struct saved_alias *al; 5758 5759 if (slab_state == FULL) { 5760 /* 5761 * If we have a leftover link then remove it. 5762 */ 5763 sysfs_remove_link(&slab_kset->kobj, name); 5764 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5765 } 5766 5767 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5768 if (!al) 5769 return -ENOMEM; 5770 5771 al->s = s; 5772 al->name = name; 5773 al->next = alias_list; 5774 alias_list = al; 5775 return 0; 5776 } 5777 5778 static int __init slab_sysfs_init(void) 5779 { 5780 struct kmem_cache *s; 5781 int err; 5782 5783 mutex_lock(&slab_mutex); 5784 5785 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5786 if (!slab_kset) { 5787 mutex_unlock(&slab_mutex); 5788 pr_err("Cannot register slab subsystem.\n"); 5789 return -ENOSYS; 5790 } 5791 5792 slab_state = FULL; 5793 5794 list_for_each_entry(s, &slab_caches, list) { 5795 err = sysfs_slab_add(s); 5796 if (err) 5797 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 5798 s->name); 5799 } 5800 5801 while (alias_list) { 5802 struct saved_alias *al = alias_list; 5803 5804 alias_list = alias_list->next; 5805 err = sysfs_slab_alias(al->s, al->name); 5806 if (err) 5807 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 5808 al->name); 5809 kfree(al); 5810 } 5811 5812 mutex_unlock(&slab_mutex); 5813 resiliency_test(); 5814 return 0; 5815 } 5816 5817 __initcall(slab_sysfs_init); 5818 #endif /* CONFIG_SYSFS */ 5819 5820 /* 5821 * The /proc/slabinfo ABI 5822 */ 5823 #ifdef CONFIG_SLUB_DEBUG 5824 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 5825 { 5826 unsigned long nr_slabs = 0; 5827 unsigned long nr_objs = 0; 5828 unsigned long nr_free = 0; 5829 int node; 5830 struct kmem_cache_node *n; 5831 5832 for_each_kmem_cache_node(s, node, n) { 5833 nr_slabs += node_nr_slabs(n); 5834 nr_objs += node_nr_objs(n); 5835 nr_free += count_partial(n, count_free); 5836 } 5837 5838 sinfo->active_objs = nr_objs - nr_free; 5839 sinfo->num_objs = nr_objs; 5840 sinfo->active_slabs = nr_slabs; 5841 sinfo->num_slabs = nr_slabs; 5842 sinfo->objects_per_slab = oo_objects(s->oo); 5843 sinfo->cache_order = oo_order(s->oo); 5844 } 5845 5846 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 5847 { 5848 } 5849 5850 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 5851 size_t count, loff_t *ppos) 5852 { 5853 return -EIO; 5854 } 5855 #endif /* CONFIG_SLUB_DEBUG */ 5856