1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* struct reclaim_state */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/cpu.h> 26 #include <linux/cpuset.h> 27 #include <linux/mempolicy.h> 28 #include <linux/ctype.h> 29 #include <linux/debugobjects.h> 30 #include <linux/kallsyms.h> 31 #include <linux/kfence.h> 32 #include <linux/memory.h> 33 #include <linux/math64.h> 34 #include <linux/fault-inject.h> 35 #include <linux/stacktrace.h> 36 #include <linux/prefetch.h> 37 #include <linux/memcontrol.h> 38 #include <linux/random.h> 39 #include <kunit/test.h> 40 41 #include <linux/debugfs.h> 42 #include <trace/events/kmem.h> 43 44 #include "internal.h" 45 46 /* 47 * Lock order: 48 * 1. slab_mutex (Global Mutex) 49 * 2. node->list_lock (Spinlock) 50 * 3. kmem_cache->cpu_slab->lock (Local lock) 51 * 4. slab_lock(page) (Only on some arches or for debugging) 52 * 5. object_map_lock (Only for debugging) 53 * 54 * slab_mutex 55 * 56 * The role of the slab_mutex is to protect the list of all the slabs 57 * and to synchronize major metadata changes to slab cache structures. 58 * Also synchronizes memory hotplug callbacks. 59 * 60 * slab_lock 61 * 62 * The slab_lock is a wrapper around the page lock, thus it is a bit 63 * spinlock. 64 * 65 * The slab_lock is only used for debugging and on arches that do not 66 * have the ability to do a cmpxchg_double. It only protects: 67 * A. page->freelist -> List of object free in a page 68 * B. page->inuse -> Number of objects in use 69 * C. page->objects -> Number of objects in page 70 * D. page->frozen -> frozen state 71 * 72 * Frozen slabs 73 * 74 * If a slab is frozen then it is exempt from list management. It is not 75 * on any list except per cpu partial list. The processor that froze the 76 * slab is the one who can perform list operations on the page. Other 77 * processors may put objects onto the freelist but the processor that 78 * froze the slab is the only one that can retrieve the objects from the 79 * page's freelist. 80 * 81 * list_lock 82 * 83 * The list_lock protects the partial and full list on each node and 84 * the partial slab counter. If taken then no new slabs may be added or 85 * removed from the lists nor make the number of partial slabs be modified. 86 * (Note that the total number of slabs is an atomic value that may be 87 * modified without taking the list lock). 88 * 89 * The list_lock is a centralized lock and thus we avoid taking it as 90 * much as possible. As long as SLUB does not have to handle partial 91 * slabs, operations can continue without any centralized lock. F.e. 92 * allocating a long series of objects that fill up slabs does not require 93 * the list lock. 94 * 95 * cpu_slab->lock local lock 96 * 97 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 98 * except the stat counters. This is a percpu structure manipulated only by 99 * the local cpu, so the lock protects against being preempted or interrupted 100 * by an irq. Fast path operations rely on lockless operations instead. 101 * On PREEMPT_RT, the local lock does not actually disable irqs (and thus 102 * prevent the lockless operations), so fastpath operations also need to take 103 * the lock and are no longer lockless. 104 * 105 * lockless fastpaths 106 * 107 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 108 * are fully lockless when satisfied from the percpu slab (and when 109 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 110 * They also don't disable preemption or migration or irqs. They rely on 111 * the transaction id (tid) field to detect being preempted or moved to 112 * another cpu. 113 * 114 * irq, preemption, migration considerations 115 * 116 * Interrupts are disabled as part of list_lock or local_lock operations, or 117 * around the slab_lock operation, in order to make the slab allocator safe 118 * to use in the context of an irq. 119 * 120 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 121 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 122 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 123 * doesn't have to be revalidated in each section protected by the local lock. 124 * 125 * SLUB assigns one slab for allocation to each processor. 126 * Allocations only occur from these slabs called cpu slabs. 127 * 128 * Slabs with free elements are kept on a partial list and during regular 129 * operations no list for full slabs is used. If an object in a full slab is 130 * freed then the slab will show up again on the partial lists. 131 * We track full slabs for debugging purposes though because otherwise we 132 * cannot scan all objects. 133 * 134 * Slabs are freed when they become empty. Teardown and setup is 135 * minimal so we rely on the page allocators per cpu caches for 136 * fast frees and allocs. 137 * 138 * page->frozen The slab is frozen and exempt from list processing. 139 * This means that the slab is dedicated to a purpose 140 * such as satisfying allocations for a specific 141 * processor. Objects may be freed in the slab while 142 * it is frozen but slab_free will then skip the usual 143 * list operations. It is up to the processor holding 144 * the slab to integrate the slab into the slab lists 145 * when the slab is no longer needed. 146 * 147 * One use of this flag is to mark slabs that are 148 * used for allocations. Then such a slab becomes a cpu 149 * slab. The cpu slab may be equipped with an additional 150 * freelist that allows lockless access to 151 * free objects in addition to the regular freelist 152 * that requires the slab lock. 153 * 154 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 155 * options set. This moves slab handling out of 156 * the fast path and disables lockless freelists. 157 */ 158 159 /* 160 * We could simply use migrate_disable()/enable() but as long as it's a 161 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 162 */ 163 #ifndef CONFIG_PREEMPT_RT 164 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 165 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 166 #else 167 #define slub_get_cpu_ptr(var) \ 168 ({ \ 169 migrate_disable(); \ 170 this_cpu_ptr(var); \ 171 }) 172 #define slub_put_cpu_ptr(var) \ 173 do { \ 174 (void)(var); \ 175 migrate_enable(); \ 176 } while (0) 177 #endif 178 179 #ifdef CONFIG_SLUB_DEBUG 180 #ifdef CONFIG_SLUB_DEBUG_ON 181 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 182 #else 183 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 184 #endif 185 #endif /* CONFIG_SLUB_DEBUG */ 186 187 static inline bool kmem_cache_debug(struct kmem_cache *s) 188 { 189 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 190 } 191 192 void *fixup_red_left(struct kmem_cache *s, void *p) 193 { 194 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 195 p += s->red_left_pad; 196 197 return p; 198 } 199 200 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 201 { 202 #ifdef CONFIG_SLUB_CPU_PARTIAL 203 return !kmem_cache_debug(s); 204 #else 205 return false; 206 #endif 207 } 208 209 /* 210 * Issues still to be resolved: 211 * 212 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 213 * 214 * - Variable sizing of the per node arrays 215 */ 216 217 /* Enable to log cmpxchg failures */ 218 #undef SLUB_DEBUG_CMPXCHG 219 220 /* 221 * Minimum number of partial slabs. These will be left on the partial 222 * lists even if they are empty. kmem_cache_shrink may reclaim them. 223 */ 224 #define MIN_PARTIAL 5 225 226 /* 227 * Maximum number of desirable partial slabs. 228 * The existence of more partial slabs makes kmem_cache_shrink 229 * sort the partial list by the number of objects in use. 230 */ 231 #define MAX_PARTIAL 10 232 233 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 234 SLAB_POISON | SLAB_STORE_USER) 235 236 /* 237 * These debug flags cannot use CMPXCHG because there might be consistency 238 * issues when checking or reading debug information 239 */ 240 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 241 SLAB_TRACE) 242 243 244 /* 245 * Debugging flags that require metadata to be stored in the slab. These get 246 * disabled when slub_debug=O is used and a cache's min order increases with 247 * metadata. 248 */ 249 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 250 251 #define OO_SHIFT 16 252 #define OO_MASK ((1 << OO_SHIFT) - 1) 253 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 254 255 /* Internal SLUB flags */ 256 /* Poison object */ 257 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) 258 /* Use cmpxchg_double */ 259 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) 260 261 /* 262 * Tracking user of a slab. 263 */ 264 #define TRACK_ADDRS_COUNT 16 265 struct track { 266 unsigned long addr; /* Called from address */ 267 #ifdef CONFIG_STACKTRACE 268 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ 269 #endif 270 int cpu; /* Was running on cpu */ 271 int pid; /* Pid context */ 272 unsigned long when; /* When did the operation occur */ 273 }; 274 275 enum track_item { TRACK_ALLOC, TRACK_FREE }; 276 277 #ifdef CONFIG_SYSFS 278 static int sysfs_slab_add(struct kmem_cache *); 279 static int sysfs_slab_alias(struct kmem_cache *, const char *); 280 #else 281 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 282 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 283 { return 0; } 284 #endif 285 286 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 287 static void debugfs_slab_add(struct kmem_cache *); 288 #else 289 static inline void debugfs_slab_add(struct kmem_cache *s) { } 290 #endif 291 292 static inline void stat(const struct kmem_cache *s, enum stat_item si) 293 { 294 #ifdef CONFIG_SLUB_STATS 295 /* 296 * The rmw is racy on a preemptible kernel but this is acceptable, so 297 * avoid this_cpu_add()'s irq-disable overhead. 298 */ 299 raw_cpu_inc(s->cpu_slab->stat[si]); 300 #endif 301 } 302 303 /* 304 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 305 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 306 * differ during memory hotplug/hotremove operations. 307 * Protected by slab_mutex. 308 */ 309 static nodemask_t slab_nodes; 310 311 /******************************************************************** 312 * Core slab cache functions 313 *******************************************************************/ 314 315 /* 316 * Returns freelist pointer (ptr). With hardening, this is obfuscated 317 * with an XOR of the address where the pointer is held and a per-cache 318 * random number. 319 */ 320 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, 321 unsigned long ptr_addr) 322 { 323 #ifdef CONFIG_SLAB_FREELIST_HARDENED 324 /* 325 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged. 326 * Normally, this doesn't cause any issues, as both set_freepointer() 327 * and get_freepointer() are called with a pointer with the same tag. 328 * However, there are some issues with CONFIG_SLUB_DEBUG code. For 329 * example, when __free_slub() iterates over objects in a cache, it 330 * passes untagged pointers to check_object(). check_object() in turns 331 * calls get_freepointer() with an untagged pointer, which causes the 332 * freepointer to be restored incorrectly. 333 */ 334 return (void *)((unsigned long)ptr ^ s->random ^ 335 swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); 336 #else 337 return ptr; 338 #endif 339 } 340 341 /* Returns the freelist pointer recorded at location ptr_addr. */ 342 static inline void *freelist_dereference(const struct kmem_cache *s, 343 void *ptr_addr) 344 { 345 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), 346 (unsigned long)ptr_addr); 347 } 348 349 static inline void *get_freepointer(struct kmem_cache *s, void *object) 350 { 351 object = kasan_reset_tag(object); 352 return freelist_dereference(s, object + s->offset); 353 } 354 355 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 356 { 357 prefetch(object + s->offset); 358 } 359 360 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 361 { 362 unsigned long freepointer_addr; 363 void *p; 364 365 if (!debug_pagealloc_enabled_static()) 366 return get_freepointer(s, object); 367 368 object = kasan_reset_tag(object); 369 freepointer_addr = (unsigned long)object + s->offset; 370 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); 371 return freelist_ptr(s, p, freepointer_addr); 372 } 373 374 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 375 { 376 unsigned long freeptr_addr = (unsigned long)object + s->offset; 377 378 #ifdef CONFIG_SLAB_FREELIST_HARDENED 379 BUG_ON(object == fp); /* naive detection of double free or corruption */ 380 #endif 381 382 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 383 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); 384 } 385 386 /* Loop over all objects in a slab */ 387 #define for_each_object(__p, __s, __addr, __objects) \ 388 for (__p = fixup_red_left(__s, __addr); \ 389 __p < (__addr) + (__objects) * (__s)->size; \ 390 __p += (__s)->size) 391 392 static inline unsigned int order_objects(unsigned int order, unsigned int size) 393 { 394 return ((unsigned int)PAGE_SIZE << order) / size; 395 } 396 397 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 398 unsigned int size) 399 { 400 struct kmem_cache_order_objects x = { 401 (order << OO_SHIFT) + order_objects(order, size) 402 }; 403 404 return x; 405 } 406 407 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 408 { 409 return x.x >> OO_SHIFT; 410 } 411 412 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 413 { 414 return x.x & OO_MASK; 415 } 416 417 /* 418 * Per slab locking using the pagelock 419 */ 420 static __always_inline void __slab_lock(struct page *page) 421 { 422 VM_BUG_ON_PAGE(PageTail(page), page); 423 bit_spin_lock(PG_locked, &page->flags); 424 } 425 426 static __always_inline void __slab_unlock(struct page *page) 427 { 428 VM_BUG_ON_PAGE(PageTail(page), page); 429 __bit_spin_unlock(PG_locked, &page->flags); 430 } 431 432 static __always_inline void slab_lock(struct page *page, unsigned long *flags) 433 { 434 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 435 local_irq_save(*flags); 436 __slab_lock(page); 437 } 438 439 static __always_inline void slab_unlock(struct page *page, unsigned long *flags) 440 { 441 __slab_unlock(page); 442 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 443 local_irq_restore(*flags); 444 } 445 446 /* 447 * Interrupts must be disabled (for the fallback code to work right), typically 448 * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different 449 * so we disable interrupts as part of slab_[un]lock(). 450 */ 451 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 452 void *freelist_old, unsigned long counters_old, 453 void *freelist_new, unsigned long counters_new, 454 const char *n) 455 { 456 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) 457 lockdep_assert_irqs_disabled(); 458 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 459 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 460 if (s->flags & __CMPXCHG_DOUBLE) { 461 if (cmpxchg_double(&page->freelist, &page->counters, 462 freelist_old, counters_old, 463 freelist_new, counters_new)) 464 return true; 465 } else 466 #endif 467 { 468 /* init to 0 to prevent spurious warnings */ 469 unsigned long flags = 0; 470 471 slab_lock(page, &flags); 472 if (page->freelist == freelist_old && 473 page->counters == counters_old) { 474 page->freelist = freelist_new; 475 page->counters = counters_new; 476 slab_unlock(page, &flags); 477 return true; 478 } 479 slab_unlock(page, &flags); 480 } 481 482 cpu_relax(); 483 stat(s, CMPXCHG_DOUBLE_FAIL); 484 485 #ifdef SLUB_DEBUG_CMPXCHG 486 pr_info("%s %s: cmpxchg double redo ", n, s->name); 487 #endif 488 489 return false; 490 } 491 492 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 493 void *freelist_old, unsigned long counters_old, 494 void *freelist_new, unsigned long counters_new, 495 const char *n) 496 { 497 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 498 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 499 if (s->flags & __CMPXCHG_DOUBLE) { 500 if (cmpxchg_double(&page->freelist, &page->counters, 501 freelist_old, counters_old, 502 freelist_new, counters_new)) 503 return true; 504 } else 505 #endif 506 { 507 unsigned long flags; 508 509 local_irq_save(flags); 510 __slab_lock(page); 511 if (page->freelist == freelist_old && 512 page->counters == counters_old) { 513 page->freelist = freelist_new; 514 page->counters = counters_new; 515 __slab_unlock(page); 516 local_irq_restore(flags); 517 return true; 518 } 519 __slab_unlock(page); 520 local_irq_restore(flags); 521 } 522 523 cpu_relax(); 524 stat(s, CMPXCHG_DOUBLE_FAIL); 525 526 #ifdef SLUB_DEBUG_CMPXCHG 527 pr_info("%s %s: cmpxchg double redo ", n, s->name); 528 #endif 529 530 return false; 531 } 532 533 #ifdef CONFIG_SLUB_DEBUG 534 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 535 static DEFINE_RAW_SPINLOCK(object_map_lock); 536 537 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 538 struct page *page) 539 { 540 void *addr = page_address(page); 541 void *p; 542 543 bitmap_zero(obj_map, page->objects); 544 545 for (p = page->freelist; p; p = get_freepointer(s, p)) 546 set_bit(__obj_to_index(s, addr, p), obj_map); 547 } 548 549 #if IS_ENABLED(CONFIG_KUNIT) 550 static bool slab_add_kunit_errors(void) 551 { 552 struct kunit_resource *resource; 553 554 if (likely(!current->kunit_test)) 555 return false; 556 557 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 558 if (!resource) 559 return false; 560 561 (*(int *)resource->data)++; 562 kunit_put_resource(resource); 563 return true; 564 } 565 #else 566 static inline bool slab_add_kunit_errors(void) { return false; } 567 #endif 568 569 /* 570 * Determine a map of object in use on a page. 571 * 572 * Node listlock must be held to guarantee that the page does 573 * not vanish from under us. 574 */ 575 static unsigned long *get_map(struct kmem_cache *s, struct page *page) 576 __acquires(&object_map_lock) 577 { 578 VM_BUG_ON(!irqs_disabled()); 579 580 raw_spin_lock(&object_map_lock); 581 582 __fill_map(object_map, s, page); 583 584 return object_map; 585 } 586 587 static void put_map(unsigned long *map) __releases(&object_map_lock) 588 { 589 VM_BUG_ON(map != object_map); 590 raw_spin_unlock(&object_map_lock); 591 } 592 593 static inline unsigned int size_from_object(struct kmem_cache *s) 594 { 595 if (s->flags & SLAB_RED_ZONE) 596 return s->size - s->red_left_pad; 597 598 return s->size; 599 } 600 601 static inline void *restore_red_left(struct kmem_cache *s, void *p) 602 { 603 if (s->flags & SLAB_RED_ZONE) 604 p -= s->red_left_pad; 605 606 return p; 607 } 608 609 /* 610 * Debug settings: 611 */ 612 #if defined(CONFIG_SLUB_DEBUG_ON) 613 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 614 #else 615 static slab_flags_t slub_debug; 616 #endif 617 618 static char *slub_debug_string; 619 static int disable_higher_order_debug; 620 621 /* 622 * slub is about to manipulate internal object metadata. This memory lies 623 * outside the range of the allocated object, so accessing it would normally 624 * be reported by kasan as a bounds error. metadata_access_enable() is used 625 * to tell kasan that these accesses are OK. 626 */ 627 static inline void metadata_access_enable(void) 628 { 629 kasan_disable_current(); 630 } 631 632 static inline void metadata_access_disable(void) 633 { 634 kasan_enable_current(); 635 } 636 637 /* 638 * Object debugging 639 */ 640 641 /* Verify that a pointer has an address that is valid within a slab page */ 642 static inline int check_valid_pointer(struct kmem_cache *s, 643 struct page *page, void *object) 644 { 645 void *base; 646 647 if (!object) 648 return 1; 649 650 base = page_address(page); 651 object = kasan_reset_tag(object); 652 object = restore_red_left(s, object); 653 if (object < base || object >= base + page->objects * s->size || 654 (object - base) % s->size) { 655 return 0; 656 } 657 658 return 1; 659 } 660 661 static void print_section(char *level, char *text, u8 *addr, 662 unsigned int length) 663 { 664 metadata_access_enable(); 665 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 666 16, 1, kasan_reset_tag((void *)addr), length, 1); 667 metadata_access_disable(); 668 } 669 670 /* 671 * See comment in calculate_sizes(). 672 */ 673 static inline bool freeptr_outside_object(struct kmem_cache *s) 674 { 675 return s->offset >= s->inuse; 676 } 677 678 /* 679 * Return offset of the end of info block which is inuse + free pointer if 680 * not overlapping with object. 681 */ 682 static inline unsigned int get_info_end(struct kmem_cache *s) 683 { 684 if (freeptr_outside_object(s)) 685 return s->inuse + sizeof(void *); 686 else 687 return s->inuse; 688 } 689 690 static struct track *get_track(struct kmem_cache *s, void *object, 691 enum track_item alloc) 692 { 693 struct track *p; 694 695 p = object + get_info_end(s); 696 697 return kasan_reset_tag(p + alloc); 698 } 699 700 static void set_track(struct kmem_cache *s, void *object, 701 enum track_item alloc, unsigned long addr) 702 { 703 struct track *p = get_track(s, object, alloc); 704 705 if (addr) { 706 #ifdef CONFIG_STACKTRACE 707 unsigned int nr_entries; 708 709 metadata_access_enable(); 710 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), 711 TRACK_ADDRS_COUNT, 3); 712 metadata_access_disable(); 713 714 if (nr_entries < TRACK_ADDRS_COUNT) 715 p->addrs[nr_entries] = 0; 716 #endif 717 p->addr = addr; 718 p->cpu = smp_processor_id(); 719 p->pid = current->pid; 720 p->when = jiffies; 721 } else { 722 memset(p, 0, sizeof(struct track)); 723 } 724 } 725 726 static void init_tracking(struct kmem_cache *s, void *object) 727 { 728 if (!(s->flags & SLAB_STORE_USER)) 729 return; 730 731 set_track(s, object, TRACK_FREE, 0UL); 732 set_track(s, object, TRACK_ALLOC, 0UL); 733 } 734 735 static void print_track(const char *s, struct track *t, unsigned long pr_time) 736 { 737 if (!t->addr) 738 return; 739 740 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 741 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 742 #ifdef CONFIG_STACKTRACE 743 { 744 int i; 745 for (i = 0; i < TRACK_ADDRS_COUNT; i++) 746 if (t->addrs[i]) 747 pr_err("\t%pS\n", (void *)t->addrs[i]); 748 else 749 break; 750 } 751 #endif 752 } 753 754 void print_tracking(struct kmem_cache *s, void *object) 755 { 756 unsigned long pr_time = jiffies; 757 if (!(s->flags & SLAB_STORE_USER)) 758 return; 759 760 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 761 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 762 } 763 764 static void print_page_info(struct page *page) 765 { 766 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n", 767 page, page->objects, page->inuse, page->freelist, 768 page->flags, &page->flags); 769 770 } 771 772 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 773 { 774 struct va_format vaf; 775 va_list args; 776 777 va_start(args, fmt); 778 vaf.fmt = fmt; 779 vaf.va = &args; 780 pr_err("=============================================================================\n"); 781 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 782 pr_err("-----------------------------------------------------------------------------\n\n"); 783 va_end(args); 784 } 785 786 __printf(2, 3) 787 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 788 { 789 struct va_format vaf; 790 va_list args; 791 792 if (slab_add_kunit_errors()) 793 return; 794 795 va_start(args, fmt); 796 vaf.fmt = fmt; 797 vaf.va = &args; 798 pr_err("FIX %s: %pV\n", s->name, &vaf); 799 va_end(args); 800 } 801 802 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 803 void **freelist, void *nextfree) 804 { 805 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 806 !check_valid_pointer(s, page, nextfree) && freelist) { 807 object_err(s, page, *freelist, "Freechain corrupt"); 808 *freelist = NULL; 809 slab_fix(s, "Isolate corrupted freechain"); 810 return true; 811 } 812 813 return false; 814 } 815 816 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) 817 { 818 unsigned int off; /* Offset of last byte */ 819 u8 *addr = page_address(page); 820 821 print_tracking(s, p); 822 823 print_page_info(page); 824 825 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 826 p, p - addr, get_freepointer(s, p)); 827 828 if (s->flags & SLAB_RED_ZONE) 829 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 830 s->red_left_pad); 831 else if (p > addr + 16) 832 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 833 834 print_section(KERN_ERR, "Object ", p, 835 min_t(unsigned int, s->object_size, PAGE_SIZE)); 836 if (s->flags & SLAB_RED_ZONE) 837 print_section(KERN_ERR, "Redzone ", p + s->object_size, 838 s->inuse - s->object_size); 839 840 off = get_info_end(s); 841 842 if (s->flags & SLAB_STORE_USER) 843 off += 2 * sizeof(struct track); 844 845 off += kasan_metadata_size(s); 846 847 if (off != size_from_object(s)) 848 /* Beginning of the filler is the free pointer */ 849 print_section(KERN_ERR, "Padding ", p + off, 850 size_from_object(s) - off); 851 852 dump_stack(); 853 } 854 855 void object_err(struct kmem_cache *s, struct page *page, 856 u8 *object, char *reason) 857 { 858 if (slab_add_kunit_errors()) 859 return; 860 861 slab_bug(s, "%s", reason); 862 print_trailer(s, page, object); 863 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 864 } 865 866 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, 867 const char *fmt, ...) 868 { 869 va_list args; 870 char buf[100]; 871 872 if (slab_add_kunit_errors()) 873 return; 874 875 va_start(args, fmt); 876 vsnprintf(buf, sizeof(buf), fmt, args); 877 va_end(args); 878 slab_bug(s, "%s", buf); 879 print_page_info(page); 880 dump_stack(); 881 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 882 } 883 884 static void init_object(struct kmem_cache *s, void *object, u8 val) 885 { 886 u8 *p = kasan_reset_tag(object); 887 888 if (s->flags & SLAB_RED_ZONE) 889 memset(p - s->red_left_pad, val, s->red_left_pad); 890 891 if (s->flags & __OBJECT_POISON) { 892 memset(p, POISON_FREE, s->object_size - 1); 893 p[s->object_size - 1] = POISON_END; 894 } 895 896 if (s->flags & SLAB_RED_ZONE) 897 memset(p + s->object_size, val, s->inuse - s->object_size); 898 } 899 900 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 901 void *from, void *to) 902 { 903 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 904 memset(from, data, to - from); 905 } 906 907 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 908 u8 *object, char *what, 909 u8 *start, unsigned int value, unsigned int bytes) 910 { 911 u8 *fault; 912 u8 *end; 913 u8 *addr = page_address(page); 914 915 metadata_access_enable(); 916 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 917 metadata_access_disable(); 918 if (!fault) 919 return 1; 920 921 end = start + bytes; 922 while (end > fault && end[-1] == value) 923 end--; 924 925 if (slab_add_kunit_errors()) 926 goto skip_bug_print; 927 928 slab_bug(s, "%s overwritten", what); 929 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 930 fault, end - 1, fault - addr, 931 fault[0], value); 932 print_trailer(s, page, object); 933 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 934 935 skip_bug_print: 936 restore_bytes(s, what, value, fault, end); 937 return 0; 938 } 939 940 /* 941 * Object layout: 942 * 943 * object address 944 * Bytes of the object to be managed. 945 * If the freepointer may overlay the object then the free 946 * pointer is at the middle of the object. 947 * 948 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 949 * 0xa5 (POISON_END) 950 * 951 * object + s->object_size 952 * Padding to reach word boundary. This is also used for Redzoning. 953 * Padding is extended by another word if Redzoning is enabled and 954 * object_size == inuse. 955 * 956 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with 957 * 0xcc (RED_ACTIVE) for objects in use. 958 * 959 * object + s->inuse 960 * Meta data starts here. 961 * 962 * A. Free pointer (if we cannot overwrite object on free) 963 * B. Tracking data for SLAB_STORE_USER 964 * C. Padding to reach required alignment boundary or at minimum 965 * one word if debugging is on to be able to detect writes 966 * before the word boundary. 967 * 968 * Padding is done using 0x5a (POISON_INUSE) 969 * 970 * object + s->size 971 * Nothing is used beyond s->size. 972 * 973 * If slabcaches are merged then the object_size and inuse boundaries are mostly 974 * ignored. And therefore no slab options that rely on these boundaries 975 * may be used with merged slabcaches. 976 */ 977 978 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 979 { 980 unsigned long off = get_info_end(s); /* The end of info */ 981 982 if (s->flags & SLAB_STORE_USER) 983 /* We also have user information there */ 984 off += 2 * sizeof(struct track); 985 986 off += kasan_metadata_size(s); 987 988 if (size_from_object(s) == off) 989 return 1; 990 991 return check_bytes_and_report(s, page, p, "Object padding", 992 p + off, POISON_INUSE, size_from_object(s) - off); 993 } 994 995 /* Check the pad bytes at the end of a slab page */ 996 static int slab_pad_check(struct kmem_cache *s, struct page *page) 997 { 998 u8 *start; 999 u8 *fault; 1000 u8 *end; 1001 u8 *pad; 1002 int length; 1003 int remainder; 1004 1005 if (!(s->flags & SLAB_POISON)) 1006 return 1; 1007 1008 start = page_address(page); 1009 length = page_size(page); 1010 end = start + length; 1011 remainder = length % s->size; 1012 if (!remainder) 1013 return 1; 1014 1015 pad = end - remainder; 1016 metadata_access_enable(); 1017 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1018 metadata_access_disable(); 1019 if (!fault) 1020 return 1; 1021 while (end > fault && end[-1] == POISON_INUSE) 1022 end--; 1023 1024 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1025 fault, end - 1, fault - start); 1026 print_section(KERN_ERR, "Padding ", pad, remainder); 1027 1028 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1029 return 0; 1030 } 1031 1032 static int check_object(struct kmem_cache *s, struct page *page, 1033 void *object, u8 val) 1034 { 1035 u8 *p = object; 1036 u8 *endobject = object + s->object_size; 1037 1038 if (s->flags & SLAB_RED_ZONE) { 1039 if (!check_bytes_and_report(s, page, object, "Left Redzone", 1040 object - s->red_left_pad, val, s->red_left_pad)) 1041 return 0; 1042 1043 if (!check_bytes_and_report(s, page, object, "Right Redzone", 1044 endobject, val, s->inuse - s->object_size)) 1045 return 0; 1046 } else { 1047 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1048 check_bytes_and_report(s, page, p, "Alignment padding", 1049 endobject, POISON_INUSE, 1050 s->inuse - s->object_size); 1051 } 1052 } 1053 1054 if (s->flags & SLAB_POISON) { 1055 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && 1056 (!check_bytes_and_report(s, page, p, "Poison", p, 1057 POISON_FREE, s->object_size - 1) || 1058 !check_bytes_and_report(s, page, p, "End Poison", 1059 p + s->object_size - 1, POISON_END, 1))) 1060 return 0; 1061 /* 1062 * check_pad_bytes cleans up on its own. 1063 */ 1064 check_pad_bytes(s, page, p); 1065 } 1066 1067 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1068 /* 1069 * Object and freepointer overlap. Cannot check 1070 * freepointer while object is allocated. 1071 */ 1072 return 1; 1073 1074 /* Check free pointer validity */ 1075 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { 1076 object_err(s, page, p, "Freepointer corrupt"); 1077 /* 1078 * No choice but to zap it and thus lose the remainder 1079 * of the free objects in this slab. May cause 1080 * another error because the object count is now wrong. 1081 */ 1082 set_freepointer(s, p, NULL); 1083 return 0; 1084 } 1085 return 1; 1086 } 1087 1088 static int check_slab(struct kmem_cache *s, struct page *page) 1089 { 1090 int maxobj; 1091 1092 if (!PageSlab(page)) { 1093 slab_err(s, page, "Not a valid slab page"); 1094 return 0; 1095 } 1096 1097 maxobj = order_objects(compound_order(page), s->size); 1098 if (page->objects > maxobj) { 1099 slab_err(s, page, "objects %u > max %u", 1100 page->objects, maxobj); 1101 return 0; 1102 } 1103 if (page->inuse > page->objects) { 1104 slab_err(s, page, "inuse %u > max %u", 1105 page->inuse, page->objects); 1106 return 0; 1107 } 1108 /* Slab_pad_check fixes things up after itself */ 1109 slab_pad_check(s, page); 1110 return 1; 1111 } 1112 1113 /* 1114 * Determine if a certain object on a page is on the freelist. Must hold the 1115 * slab lock to guarantee that the chains are in a consistent state. 1116 */ 1117 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) 1118 { 1119 int nr = 0; 1120 void *fp; 1121 void *object = NULL; 1122 int max_objects; 1123 1124 fp = page->freelist; 1125 while (fp && nr <= page->objects) { 1126 if (fp == search) 1127 return 1; 1128 if (!check_valid_pointer(s, page, fp)) { 1129 if (object) { 1130 object_err(s, page, object, 1131 "Freechain corrupt"); 1132 set_freepointer(s, object, NULL); 1133 } else { 1134 slab_err(s, page, "Freepointer corrupt"); 1135 page->freelist = NULL; 1136 page->inuse = page->objects; 1137 slab_fix(s, "Freelist cleared"); 1138 return 0; 1139 } 1140 break; 1141 } 1142 object = fp; 1143 fp = get_freepointer(s, object); 1144 nr++; 1145 } 1146 1147 max_objects = order_objects(compound_order(page), s->size); 1148 if (max_objects > MAX_OBJS_PER_PAGE) 1149 max_objects = MAX_OBJS_PER_PAGE; 1150 1151 if (page->objects != max_objects) { 1152 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", 1153 page->objects, max_objects); 1154 page->objects = max_objects; 1155 slab_fix(s, "Number of objects adjusted"); 1156 } 1157 if (page->inuse != page->objects - nr) { 1158 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", 1159 page->inuse, page->objects - nr); 1160 page->inuse = page->objects - nr; 1161 slab_fix(s, "Object count adjusted"); 1162 } 1163 return search == NULL; 1164 } 1165 1166 static void trace(struct kmem_cache *s, struct page *page, void *object, 1167 int alloc) 1168 { 1169 if (s->flags & SLAB_TRACE) { 1170 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1171 s->name, 1172 alloc ? "alloc" : "free", 1173 object, page->inuse, 1174 page->freelist); 1175 1176 if (!alloc) 1177 print_section(KERN_INFO, "Object ", (void *)object, 1178 s->object_size); 1179 1180 dump_stack(); 1181 } 1182 } 1183 1184 /* 1185 * Tracking of fully allocated slabs for debugging purposes. 1186 */ 1187 static void add_full(struct kmem_cache *s, 1188 struct kmem_cache_node *n, struct page *page) 1189 { 1190 if (!(s->flags & SLAB_STORE_USER)) 1191 return; 1192 1193 lockdep_assert_held(&n->list_lock); 1194 list_add(&page->slab_list, &n->full); 1195 } 1196 1197 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1198 { 1199 if (!(s->flags & SLAB_STORE_USER)) 1200 return; 1201 1202 lockdep_assert_held(&n->list_lock); 1203 list_del(&page->slab_list); 1204 } 1205 1206 /* Tracking of the number of slabs for debugging purposes */ 1207 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1208 { 1209 struct kmem_cache_node *n = get_node(s, node); 1210 1211 return atomic_long_read(&n->nr_slabs); 1212 } 1213 1214 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1215 { 1216 return atomic_long_read(&n->nr_slabs); 1217 } 1218 1219 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1220 { 1221 struct kmem_cache_node *n = get_node(s, node); 1222 1223 /* 1224 * May be called early in order to allocate a slab for the 1225 * kmem_cache_node structure. Solve the chicken-egg 1226 * dilemma by deferring the increment of the count during 1227 * bootstrap (see early_kmem_cache_node_alloc). 1228 */ 1229 if (likely(n)) { 1230 atomic_long_inc(&n->nr_slabs); 1231 atomic_long_add(objects, &n->total_objects); 1232 } 1233 } 1234 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1235 { 1236 struct kmem_cache_node *n = get_node(s, node); 1237 1238 atomic_long_dec(&n->nr_slabs); 1239 atomic_long_sub(objects, &n->total_objects); 1240 } 1241 1242 /* Object debug checks for alloc/free paths */ 1243 static void setup_object_debug(struct kmem_cache *s, struct page *page, 1244 void *object) 1245 { 1246 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1247 return; 1248 1249 init_object(s, object, SLUB_RED_INACTIVE); 1250 init_tracking(s, object); 1251 } 1252 1253 static 1254 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1255 { 1256 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1257 return; 1258 1259 metadata_access_enable(); 1260 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page)); 1261 metadata_access_disable(); 1262 } 1263 1264 static inline int alloc_consistency_checks(struct kmem_cache *s, 1265 struct page *page, void *object) 1266 { 1267 if (!check_slab(s, page)) 1268 return 0; 1269 1270 if (!check_valid_pointer(s, page, object)) { 1271 object_err(s, page, object, "Freelist Pointer check fails"); 1272 return 0; 1273 } 1274 1275 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) 1276 return 0; 1277 1278 return 1; 1279 } 1280 1281 static noinline int alloc_debug_processing(struct kmem_cache *s, 1282 struct page *page, 1283 void *object, unsigned long addr) 1284 { 1285 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1286 if (!alloc_consistency_checks(s, page, object)) 1287 goto bad; 1288 } 1289 1290 /* Success perform special debug activities for allocs */ 1291 if (s->flags & SLAB_STORE_USER) 1292 set_track(s, object, TRACK_ALLOC, addr); 1293 trace(s, page, object, 1); 1294 init_object(s, object, SLUB_RED_ACTIVE); 1295 return 1; 1296 1297 bad: 1298 if (PageSlab(page)) { 1299 /* 1300 * If this is a slab page then lets do the best we can 1301 * to avoid issues in the future. Marking all objects 1302 * as used avoids touching the remaining objects. 1303 */ 1304 slab_fix(s, "Marking all objects used"); 1305 page->inuse = page->objects; 1306 page->freelist = NULL; 1307 } 1308 return 0; 1309 } 1310 1311 static inline int free_consistency_checks(struct kmem_cache *s, 1312 struct page *page, void *object, unsigned long addr) 1313 { 1314 if (!check_valid_pointer(s, page, object)) { 1315 slab_err(s, page, "Invalid object pointer 0x%p", object); 1316 return 0; 1317 } 1318 1319 if (on_freelist(s, page, object)) { 1320 object_err(s, page, object, "Object already free"); 1321 return 0; 1322 } 1323 1324 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1325 return 0; 1326 1327 if (unlikely(s != page->slab_cache)) { 1328 if (!PageSlab(page)) { 1329 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", 1330 object); 1331 } else if (!page->slab_cache) { 1332 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1333 object); 1334 dump_stack(); 1335 } else 1336 object_err(s, page, object, 1337 "page slab pointer corrupt."); 1338 return 0; 1339 } 1340 return 1; 1341 } 1342 1343 /* Supports checking bulk free of a constructed freelist */ 1344 static noinline int free_debug_processing( 1345 struct kmem_cache *s, struct page *page, 1346 void *head, void *tail, int bulk_cnt, 1347 unsigned long addr) 1348 { 1349 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1350 void *object = head; 1351 int cnt = 0; 1352 unsigned long flags, flags2; 1353 int ret = 0; 1354 1355 spin_lock_irqsave(&n->list_lock, flags); 1356 slab_lock(page, &flags2); 1357 1358 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1359 if (!check_slab(s, page)) 1360 goto out; 1361 } 1362 1363 next_object: 1364 cnt++; 1365 1366 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1367 if (!free_consistency_checks(s, page, object, addr)) 1368 goto out; 1369 } 1370 1371 if (s->flags & SLAB_STORE_USER) 1372 set_track(s, object, TRACK_FREE, addr); 1373 trace(s, page, object, 0); 1374 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 1375 init_object(s, object, SLUB_RED_INACTIVE); 1376 1377 /* Reached end of constructed freelist yet? */ 1378 if (object != tail) { 1379 object = get_freepointer(s, object); 1380 goto next_object; 1381 } 1382 ret = 1; 1383 1384 out: 1385 if (cnt != bulk_cnt) 1386 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", 1387 bulk_cnt, cnt); 1388 1389 slab_unlock(page, &flags2); 1390 spin_unlock_irqrestore(&n->list_lock, flags); 1391 if (!ret) 1392 slab_fix(s, "Object at 0x%p not freed", object); 1393 return ret; 1394 } 1395 1396 /* 1397 * Parse a block of slub_debug options. Blocks are delimited by ';' 1398 * 1399 * @str: start of block 1400 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1401 * @slabs: return start of list of slabs, or NULL when there's no list 1402 * @init: assume this is initial parsing and not per-kmem-create parsing 1403 * 1404 * returns the start of next block if there's any, or NULL 1405 */ 1406 static char * 1407 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1408 { 1409 bool higher_order_disable = false; 1410 1411 /* Skip any completely empty blocks */ 1412 while (*str && *str == ';') 1413 str++; 1414 1415 if (*str == ',') { 1416 /* 1417 * No options but restriction on slabs. This means full 1418 * debugging for slabs matching a pattern. 1419 */ 1420 *flags = DEBUG_DEFAULT_FLAGS; 1421 goto check_slabs; 1422 } 1423 *flags = 0; 1424 1425 /* Determine which debug features should be switched on */ 1426 for (; *str && *str != ',' && *str != ';'; str++) { 1427 switch (tolower(*str)) { 1428 case '-': 1429 *flags = 0; 1430 break; 1431 case 'f': 1432 *flags |= SLAB_CONSISTENCY_CHECKS; 1433 break; 1434 case 'z': 1435 *flags |= SLAB_RED_ZONE; 1436 break; 1437 case 'p': 1438 *flags |= SLAB_POISON; 1439 break; 1440 case 'u': 1441 *flags |= SLAB_STORE_USER; 1442 break; 1443 case 't': 1444 *flags |= SLAB_TRACE; 1445 break; 1446 case 'a': 1447 *flags |= SLAB_FAILSLAB; 1448 break; 1449 case 'o': 1450 /* 1451 * Avoid enabling debugging on caches if its minimum 1452 * order would increase as a result. 1453 */ 1454 higher_order_disable = true; 1455 break; 1456 default: 1457 if (init) 1458 pr_err("slub_debug option '%c' unknown. skipped\n", *str); 1459 } 1460 } 1461 check_slabs: 1462 if (*str == ',') 1463 *slabs = ++str; 1464 else 1465 *slabs = NULL; 1466 1467 /* Skip over the slab list */ 1468 while (*str && *str != ';') 1469 str++; 1470 1471 /* Skip any completely empty blocks */ 1472 while (*str && *str == ';') 1473 str++; 1474 1475 if (init && higher_order_disable) 1476 disable_higher_order_debug = 1; 1477 1478 if (*str) 1479 return str; 1480 else 1481 return NULL; 1482 } 1483 1484 static int __init setup_slub_debug(char *str) 1485 { 1486 slab_flags_t flags; 1487 slab_flags_t global_flags; 1488 char *saved_str; 1489 char *slab_list; 1490 bool global_slub_debug_changed = false; 1491 bool slab_list_specified = false; 1492 1493 global_flags = DEBUG_DEFAULT_FLAGS; 1494 if (*str++ != '=' || !*str) 1495 /* 1496 * No options specified. Switch on full debugging. 1497 */ 1498 goto out; 1499 1500 saved_str = str; 1501 while (str) { 1502 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1503 1504 if (!slab_list) { 1505 global_flags = flags; 1506 global_slub_debug_changed = true; 1507 } else { 1508 slab_list_specified = true; 1509 } 1510 } 1511 1512 /* 1513 * For backwards compatibility, a single list of flags with list of 1514 * slabs means debugging is only changed for those slabs, so the global 1515 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1516 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1517 * long as there is no option specifying flags without a slab list. 1518 */ 1519 if (slab_list_specified) { 1520 if (!global_slub_debug_changed) 1521 global_flags = slub_debug; 1522 slub_debug_string = saved_str; 1523 } 1524 out: 1525 slub_debug = global_flags; 1526 if (slub_debug != 0 || slub_debug_string) 1527 static_branch_enable(&slub_debug_enabled); 1528 else 1529 static_branch_disable(&slub_debug_enabled); 1530 if ((static_branch_unlikely(&init_on_alloc) || 1531 static_branch_unlikely(&init_on_free)) && 1532 (slub_debug & SLAB_POISON)) 1533 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1534 return 1; 1535 } 1536 1537 __setup("slub_debug", setup_slub_debug); 1538 1539 /* 1540 * kmem_cache_flags - apply debugging options to the cache 1541 * @object_size: the size of an object without meta data 1542 * @flags: flags to set 1543 * @name: name of the cache 1544 * 1545 * Debug option(s) are applied to @flags. In addition to the debug 1546 * option(s), if a slab name (or multiple) is specified i.e. 1547 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1548 * then only the select slabs will receive the debug option(s). 1549 */ 1550 slab_flags_t kmem_cache_flags(unsigned int object_size, 1551 slab_flags_t flags, const char *name) 1552 { 1553 char *iter; 1554 size_t len; 1555 char *next_block; 1556 slab_flags_t block_flags; 1557 slab_flags_t slub_debug_local = slub_debug; 1558 1559 /* 1560 * If the slab cache is for debugging (e.g. kmemleak) then 1561 * don't store user (stack trace) information by default, 1562 * but let the user enable it via the command line below. 1563 */ 1564 if (flags & SLAB_NOLEAKTRACE) 1565 slub_debug_local &= ~SLAB_STORE_USER; 1566 1567 len = strlen(name); 1568 next_block = slub_debug_string; 1569 /* Go through all blocks of debug options, see if any matches our slab's name */ 1570 while (next_block) { 1571 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1572 if (!iter) 1573 continue; 1574 /* Found a block that has a slab list, search it */ 1575 while (*iter) { 1576 char *end, *glob; 1577 size_t cmplen; 1578 1579 end = strchrnul(iter, ','); 1580 if (next_block && next_block < end) 1581 end = next_block - 1; 1582 1583 glob = strnchr(iter, end - iter, '*'); 1584 if (glob) 1585 cmplen = glob - iter; 1586 else 1587 cmplen = max_t(size_t, len, (end - iter)); 1588 1589 if (!strncmp(name, iter, cmplen)) { 1590 flags |= block_flags; 1591 return flags; 1592 } 1593 1594 if (!*end || *end == ';') 1595 break; 1596 iter = end + 1; 1597 } 1598 } 1599 1600 return flags | slub_debug_local; 1601 } 1602 #else /* !CONFIG_SLUB_DEBUG */ 1603 static inline void setup_object_debug(struct kmem_cache *s, 1604 struct page *page, void *object) {} 1605 static inline 1606 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1607 1608 static inline int alloc_debug_processing(struct kmem_cache *s, 1609 struct page *page, void *object, unsigned long addr) { return 0; } 1610 1611 static inline int free_debug_processing( 1612 struct kmem_cache *s, struct page *page, 1613 void *head, void *tail, int bulk_cnt, 1614 unsigned long addr) { return 0; } 1615 1616 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1617 { return 1; } 1618 static inline int check_object(struct kmem_cache *s, struct page *page, 1619 void *object, u8 val) { return 1; } 1620 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1621 struct page *page) {} 1622 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1623 struct page *page) {} 1624 slab_flags_t kmem_cache_flags(unsigned int object_size, 1625 slab_flags_t flags, const char *name) 1626 { 1627 return flags; 1628 } 1629 #define slub_debug 0 1630 1631 #define disable_higher_order_debug 0 1632 1633 static inline unsigned long slabs_node(struct kmem_cache *s, int node) 1634 { return 0; } 1635 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1636 { return 0; } 1637 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1638 int objects) {} 1639 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1640 int objects) {} 1641 1642 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, 1643 void **freelist, void *nextfree) 1644 { 1645 return false; 1646 } 1647 #endif /* CONFIG_SLUB_DEBUG */ 1648 1649 /* 1650 * Hooks for other subsystems that check memory allocations. In a typical 1651 * production configuration these hooks all should produce no code at all. 1652 */ 1653 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1654 { 1655 ptr = kasan_kmalloc_large(ptr, size, flags); 1656 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 1657 kmemleak_alloc(ptr, size, 1, flags); 1658 return ptr; 1659 } 1660 1661 static __always_inline void kfree_hook(void *x) 1662 { 1663 kmemleak_free(x); 1664 kasan_kfree_large(x); 1665 } 1666 1667 static __always_inline bool slab_free_hook(struct kmem_cache *s, 1668 void *x, bool init) 1669 { 1670 kmemleak_free_recursive(x, s->flags); 1671 1672 debug_check_no_locks_freed(x, s->object_size); 1673 1674 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1675 debug_check_no_obj_freed(x, s->object_size); 1676 1677 /* Use KCSAN to help debug racy use-after-free. */ 1678 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 1679 __kcsan_check_access(x, s->object_size, 1680 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 1681 1682 /* 1683 * As memory initialization might be integrated into KASAN, 1684 * kasan_slab_free and initialization memset's must be 1685 * kept together to avoid discrepancies in behavior. 1686 * 1687 * The initialization memset's clear the object and the metadata, 1688 * but don't touch the SLAB redzone. 1689 */ 1690 if (init) { 1691 int rsize; 1692 1693 if (!kasan_has_integrated_init()) 1694 memset(kasan_reset_tag(x), 0, s->object_size); 1695 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 1696 memset((char *)kasan_reset_tag(x) + s->inuse, 0, 1697 s->size - s->inuse - rsize); 1698 } 1699 /* KASAN might put x into memory quarantine, delaying its reuse. */ 1700 return kasan_slab_free(s, x, init); 1701 } 1702 1703 static inline bool slab_free_freelist_hook(struct kmem_cache *s, 1704 void **head, void **tail) 1705 { 1706 1707 void *object; 1708 void *next = *head; 1709 void *old_tail = *tail ? *tail : *head; 1710 1711 if (is_kfence_address(next)) { 1712 slab_free_hook(s, next, false); 1713 return true; 1714 } 1715 1716 /* Head and tail of the reconstructed freelist */ 1717 *head = NULL; 1718 *tail = NULL; 1719 1720 do { 1721 object = next; 1722 next = get_freepointer(s, object); 1723 1724 /* If object's reuse doesn't have to be delayed */ 1725 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { 1726 /* Move object to the new freelist */ 1727 set_freepointer(s, object, *head); 1728 *head = object; 1729 if (!*tail) 1730 *tail = object; 1731 } 1732 } while (object != old_tail); 1733 1734 if (*head == *tail) 1735 *tail = NULL; 1736 1737 return *head != NULL; 1738 } 1739 1740 static void *setup_object(struct kmem_cache *s, struct page *page, 1741 void *object) 1742 { 1743 setup_object_debug(s, page, object); 1744 object = kasan_init_slab_obj(s, object); 1745 if (unlikely(s->ctor)) { 1746 kasan_unpoison_object_data(s, object); 1747 s->ctor(object); 1748 kasan_poison_object_data(s, object); 1749 } 1750 return object; 1751 } 1752 1753 /* 1754 * Slab allocation and freeing 1755 */ 1756 static inline struct page *alloc_slab_page(struct kmem_cache *s, 1757 gfp_t flags, int node, struct kmem_cache_order_objects oo) 1758 { 1759 struct page *page; 1760 unsigned int order = oo_order(oo); 1761 1762 if (node == NUMA_NO_NODE) 1763 page = alloc_pages(flags, order); 1764 else 1765 page = __alloc_pages_node(node, flags, order); 1766 1767 return page; 1768 } 1769 1770 #ifdef CONFIG_SLAB_FREELIST_RANDOM 1771 /* Pre-initialize the random sequence cache */ 1772 static int init_cache_random_seq(struct kmem_cache *s) 1773 { 1774 unsigned int count = oo_objects(s->oo); 1775 int err; 1776 1777 /* Bailout if already initialised */ 1778 if (s->random_seq) 1779 return 0; 1780 1781 err = cache_random_seq_create(s, count, GFP_KERNEL); 1782 if (err) { 1783 pr_err("SLUB: Unable to initialize free list for %s\n", 1784 s->name); 1785 return err; 1786 } 1787 1788 /* Transform to an offset on the set of pages */ 1789 if (s->random_seq) { 1790 unsigned int i; 1791 1792 for (i = 0; i < count; i++) 1793 s->random_seq[i] *= s->size; 1794 } 1795 return 0; 1796 } 1797 1798 /* Initialize each random sequence freelist per cache */ 1799 static void __init init_freelist_randomization(void) 1800 { 1801 struct kmem_cache *s; 1802 1803 mutex_lock(&slab_mutex); 1804 1805 list_for_each_entry(s, &slab_caches, list) 1806 init_cache_random_seq(s); 1807 1808 mutex_unlock(&slab_mutex); 1809 } 1810 1811 /* Get the next entry on the pre-computed freelist randomized */ 1812 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, 1813 unsigned long *pos, void *start, 1814 unsigned long page_limit, 1815 unsigned long freelist_count) 1816 { 1817 unsigned int idx; 1818 1819 /* 1820 * If the target page allocation failed, the number of objects on the 1821 * page might be smaller than the usual size defined by the cache. 1822 */ 1823 do { 1824 idx = s->random_seq[*pos]; 1825 *pos += 1; 1826 if (*pos >= freelist_count) 1827 *pos = 0; 1828 } while (unlikely(idx >= page_limit)); 1829 1830 return (char *)start + idx; 1831 } 1832 1833 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 1834 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1835 { 1836 void *start; 1837 void *cur; 1838 void *next; 1839 unsigned long idx, pos, page_limit, freelist_count; 1840 1841 if (page->objects < 2 || !s->random_seq) 1842 return false; 1843 1844 freelist_count = oo_objects(s->oo); 1845 pos = get_random_int() % freelist_count; 1846 1847 page_limit = page->objects * s->size; 1848 start = fixup_red_left(s, page_address(page)); 1849 1850 /* First entry is used as the base of the freelist */ 1851 cur = next_freelist_entry(s, page, &pos, start, page_limit, 1852 freelist_count); 1853 cur = setup_object(s, page, cur); 1854 page->freelist = cur; 1855 1856 for (idx = 1; idx < page->objects; idx++) { 1857 next = next_freelist_entry(s, page, &pos, start, page_limit, 1858 freelist_count); 1859 next = setup_object(s, page, next); 1860 set_freepointer(s, cur, next); 1861 cur = next; 1862 } 1863 set_freepointer(s, cur, NULL); 1864 1865 return true; 1866 } 1867 #else 1868 static inline int init_cache_random_seq(struct kmem_cache *s) 1869 { 1870 return 0; 1871 } 1872 static inline void init_freelist_randomization(void) { } 1873 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) 1874 { 1875 return false; 1876 } 1877 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1878 1879 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1880 { 1881 struct page *page; 1882 struct kmem_cache_order_objects oo = s->oo; 1883 gfp_t alloc_gfp; 1884 void *start, *p, *next; 1885 int idx; 1886 bool shuffle; 1887 1888 flags &= gfp_allowed_mask; 1889 1890 flags |= s->allocflags; 1891 1892 /* 1893 * Let the initial higher-order allocation fail under memory pressure 1894 * so we fall-back to the minimum order allocation. 1895 */ 1896 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 1897 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 1898 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 1899 1900 page = alloc_slab_page(s, alloc_gfp, node, oo); 1901 if (unlikely(!page)) { 1902 oo = s->min; 1903 alloc_gfp = flags; 1904 /* 1905 * Allocation may have failed due to fragmentation. 1906 * Try a lower order alloc if possible 1907 */ 1908 page = alloc_slab_page(s, alloc_gfp, node, oo); 1909 if (unlikely(!page)) 1910 goto out; 1911 stat(s, ORDER_FALLBACK); 1912 } 1913 1914 page->objects = oo_objects(oo); 1915 1916 account_slab_page(page, oo_order(oo), s, flags); 1917 1918 page->slab_cache = s; 1919 __SetPageSlab(page); 1920 if (page_is_pfmemalloc(page)) 1921 SetPageSlabPfmemalloc(page); 1922 1923 kasan_poison_slab(page); 1924 1925 start = page_address(page); 1926 1927 setup_page_debug(s, page, start); 1928 1929 shuffle = shuffle_freelist(s, page); 1930 1931 if (!shuffle) { 1932 start = fixup_red_left(s, start); 1933 start = setup_object(s, page, start); 1934 page->freelist = start; 1935 for (idx = 0, p = start; idx < page->objects - 1; idx++) { 1936 next = p + s->size; 1937 next = setup_object(s, page, next); 1938 set_freepointer(s, p, next); 1939 p = next; 1940 } 1941 set_freepointer(s, p, NULL); 1942 } 1943 1944 page->inuse = page->objects; 1945 page->frozen = 1; 1946 1947 out: 1948 if (!page) 1949 return NULL; 1950 1951 inc_slabs_node(s, page_to_nid(page), page->objects); 1952 1953 return page; 1954 } 1955 1956 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) 1957 { 1958 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 1959 flags = kmalloc_fix_flags(flags); 1960 1961 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 1962 1963 return allocate_slab(s, 1964 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 1965 } 1966 1967 static void __free_slab(struct kmem_cache *s, struct page *page) 1968 { 1969 int order = compound_order(page); 1970 int pages = 1 << order; 1971 1972 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 1973 void *p; 1974 1975 slab_pad_check(s, page); 1976 for_each_object(p, s, page_address(page), 1977 page->objects) 1978 check_object(s, page, p, SLUB_RED_INACTIVE); 1979 } 1980 1981 __ClearPageSlabPfmemalloc(page); 1982 __ClearPageSlab(page); 1983 /* In union with page->mapping where page allocator expects NULL */ 1984 page->slab_cache = NULL; 1985 if (current->reclaim_state) 1986 current->reclaim_state->reclaimed_slab += pages; 1987 unaccount_slab_page(page, order, s); 1988 __free_pages(page, order); 1989 } 1990 1991 static void rcu_free_slab(struct rcu_head *h) 1992 { 1993 struct page *page = container_of(h, struct page, rcu_head); 1994 1995 __free_slab(page->slab_cache, page); 1996 } 1997 1998 static void free_slab(struct kmem_cache *s, struct page *page) 1999 { 2000 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { 2001 call_rcu(&page->rcu_head, rcu_free_slab); 2002 } else 2003 __free_slab(s, page); 2004 } 2005 2006 static void discard_slab(struct kmem_cache *s, struct page *page) 2007 { 2008 dec_slabs_node(s, page_to_nid(page), page->objects); 2009 free_slab(s, page); 2010 } 2011 2012 /* 2013 * Management of partially allocated slabs. 2014 */ 2015 static inline void 2016 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) 2017 { 2018 n->nr_partial++; 2019 if (tail == DEACTIVATE_TO_TAIL) 2020 list_add_tail(&page->slab_list, &n->partial); 2021 else 2022 list_add(&page->slab_list, &n->partial); 2023 } 2024 2025 static inline void add_partial(struct kmem_cache_node *n, 2026 struct page *page, int tail) 2027 { 2028 lockdep_assert_held(&n->list_lock); 2029 __add_partial(n, page, tail); 2030 } 2031 2032 static inline void remove_partial(struct kmem_cache_node *n, 2033 struct page *page) 2034 { 2035 lockdep_assert_held(&n->list_lock); 2036 list_del(&page->slab_list); 2037 n->nr_partial--; 2038 } 2039 2040 /* 2041 * Remove slab from the partial list, freeze it and 2042 * return the pointer to the freelist. 2043 * 2044 * Returns a list of objects or NULL if it fails. 2045 */ 2046 static inline void *acquire_slab(struct kmem_cache *s, 2047 struct kmem_cache_node *n, struct page *page, 2048 int mode, int *objects) 2049 { 2050 void *freelist; 2051 unsigned long counters; 2052 struct page new; 2053 2054 lockdep_assert_held(&n->list_lock); 2055 2056 /* 2057 * Zap the freelist and set the frozen bit. 2058 * The old freelist is the list of objects for the 2059 * per cpu allocation list. 2060 */ 2061 freelist = page->freelist; 2062 counters = page->counters; 2063 new.counters = counters; 2064 *objects = new.objects - new.inuse; 2065 if (mode) { 2066 new.inuse = page->objects; 2067 new.freelist = NULL; 2068 } else { 2069 new.freelist = freelist; 2070 } 2071 2072 VM_BUG_ON(new.frozen); 2073 new.frozen = 1; 2074 2075 if (!__cmpxchg_double_slab(s, page, 2076 freelist, counters, 2077 new.freelist, new.counters, 2078 "acquire_slab")) 2079 return NULL; 2080 2081 remove_partial(n, page); 2082 WARN_ON(!freelist); 2083 return freelist; 2084 } 2085 2086 #ifdef CONFIG_SLUB_CPU_PARTIAL 2087 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); 2088 #else 2089 static inline void put_cpu_partial(struct kmem_cache *s, struct page *page, 2090 int drain) { } 2091 #endif 2092 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); 2093 2094 /* 2095 * Try to allocate a partial slab from a specific node. 2096 */ 2097 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, 2098 struct page **ret_page, gfp_t gfpflags) 2099 { 2100 struct page *page, *page2; 2101 void *object = NULL; 2102 unsigned int available = 0; 2103 unsigned long flags; 2104 int objects; 2105 2106 /* 2107 * Racy check. If we mistakenly see no partial slabs then we 2108 * just allocate an empty slab. If we mistakenly try to get a 2109 * partial slab and there is none available then get_partial() 2110 * will return NULL. 2111 */ 2112 if (!n || !n->nr_partial) 2113 return NULL; 2114 2115 spin_lock_irqsave(&n->list_lock, flags); 2116 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { 2117 void *t; 2118 2119 if (!pfmemalloc_match(page, gfpflags)) 2120 continue; 2121 2122 t = acquire_slab(s, n, page, object == NULL, &objects); 2123 if (!t) 2124 break; 2125 2126 available += objects; 2127 if (!object) { 2128 *ret_page = page; 2129 stat(s, ALLOC_FROM_PARTIAL); 2130 object = t; 2131 } else { 2132 put_cpu_partial(s, page, 0); 2133 stat(s, CPU_PARTIAL_NODE); 2134 } 2135 if (!kmem_cache_has_cpu_partial(s) 2136 || available > slub_cpu_partial(s) / 2) 2137 break; 2138 2139 } 2140 spin_unlock_irqrestore(&n->list_lock, flags); 2141 return object; 2142 } 2143 2144 /* 2145 * Get a page from somewhere. Search in increasing NUMA distances. 2146 */ 2147 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, 2148 struct page **ret_page) 2149 { 2150 #ifdef CONFIG_NUMA 2151 struct zonelist *zonelist; 2152 struct zoneref *z; 2153 struct zone *zone; 2154 enum zone_type highest_zoneidx = gfp_zone(flags); 2155 void *object; 2156 unsigned int cpuset_mems_cookie; 2157 2158 /* 2159 * The defrag ratio allows a configuration of the tradeoffs between 2160 * inter node defragmentation and node local allocations. A lower 2161 * defrag_ratio increases the tendency to do local allocations 2162 * instead of attempting to obtain partial slabs from other nodes. 2163 * 2164 * If the defrag_ratio is set to 0 then kmalloc() always 2165 * returns node local objects. If the ratio is higher then kmalloc() 2166 * may return off node objects because partial slabs are obtained 2167 * from other nodes and filled up. 2168 * 2169 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2170 * (which makes defrag_ratio = 1000) then every (well almost) 2171 * allocation will first attempt to defrag slab caches on other nodes. 2172 * This means scanning over all nodes to look for partial slabs which 2173 * may be expensive if we do it every time we are trying to find a slab 2174 * with available objects. 2175 */ 2176 if (!s->remote_node_defrag_ratio || 2177 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2178 return NULL; 2179 2180 do { 2181 cpuset_mems_cookie = read_mems_allowed_begin(); 2182 zonelist = node_zonelist(mempolicy_slab_node(), flags); 2183 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2184 struct kmem_cache_node *n; 2185 2186 n = get_node(s, zone_to_nid(zone)); 2187 2188 if (n && cpuset_zone_allowed(zone, flags) && 2189 n->nr_partial > s->min_partial) { 2190 object = get_partial_node(s, n, ret_page, flags); 2191 if (object) { 2192 /* 2193 * Don't check read_mems_allowed_retry() 2194 * here - if mems_allowed was updated in 2195 * parallel, that was a harmless race 2196 * between allocation and the cpuset 2197 * update 2198 */ 2199 return object; 2200 } 2201 } 2202 } 2203 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2204 #endif /* CONFIG_NUMA */ 2205 return NULL; 2206 } 2207 2208 /* 2209 * Get a partial page, lock it and return it. 2210 */ 2211 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, 2212 struct page **ret_page) 2213 { 2214 void *object; 2215 int searchnode = node; 2216 2217 if (node == NUMA_NO_NODE) 2218 searchnode = numa_mem_id(); 2219 2220 object = get_partial_node(s, get_node(s, searchnode), ret_page, flags); 2221 if (object || node != NUMA_NO_NODE) 2222 return object; 2223 2224 return get_any_partial(s, flags, ret_page); 2225 } 2226 2227 #ifdef CONFIG_PREEMPTION 2228 /* 2229 * Calculate the next globally unique transaction for disambiguation 2230 * during cmpxchg. The transactions start with the cpu number and are then 2231 * incremented by CONFIG_NR_CPUS. 2232 */ 2233 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2234 #else 2235 /* 2236 * No preemption supported therefore also no need to check for 2237 * different cpus. 2238 */ 2239 #define TID_STEP 1 2240 #endif 2241 2242 static inline unsigned long next_tid(unsigned long tid) 2243 { 2244 return tid + TID_STEP; 2245 } 2246 2247 #ifdef SLUB_DEBUG_CMPXCHG 2248 static inline unsigned int tid_to_cpu(unsigned long tid) 2249 { 2250 return tid % TID_STEP; 2251 } 2252 2253 static inline unsigned long tid_to_event(unsigned long tid) 2254 { 2255 return tid / TID_STEP; 2256 } 2257 #endif 2258 2259 static inline unsigned int init_tid(int cpu) 2260 { 2261 return cpu; 2262 } 2263 2264 static inline void note_cmpxchg_failure(const char *n, 2265 const struct kmem_cache *s, unsigned long tid) 2266 { 2267 #ifdef SLUB_DEBUG_CMPXCHG 2268 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2269 2270 pr_info("%s %s: cmpxchg redo ", n, s->name); 2271 2272 #ifdef CONFIG_PREEMPTION 2273 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2274 pr_warn("due to cpu change %d -> %d\n", 2275 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2276 else 2277 #endif 2278 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2279 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2280 tid_to_event(tid), tid_to_event(actual_tid)); 2281 else 2282 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2283 actual_tid, tid, next_tid(tid)); 2284 #endif 2285 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2286 } 2287 2288 static void init_kmem_cache_cpus(struct kmem_cache *s) 2289 { 2290 int cpu; 2291 struct kmem_cache_cpu *c; 2292 2293 for_each_possible_cpu(cpu) { 2294 c = per_cpu_ptr(s->cpu_slab, cpu); 2295 local_lock_init(&c->lock); 2296 c->tid = init_tid(cpu); 2297 } 2298 } 2299 2300 /* 2301 * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist, 2302 * unfreezes the slabs and puts it on the proper list. 2303 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2304 * by the caller. 2305 */ 2306 static void deactivate_slab(struct kmem_cache *s, struct page *page, 2307 void *freelist) 2308 { 2309 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 2310 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 2311 int lock = 0, free_delta = 0; 2312 enum slab_modes l = M_NONE, m = M_NONE; 2313 void *nextfree, *freelist_iter, *freelist_tail; 2314 int tail = DEACTIVATE_TO_HEAD; 2315 unsigned long flags = 0; 2316 struct page new; 2317 struct page old; 2318 2319 if (page->freelist) { 2320 stat(s, DEACTIVATE_REMOTE_FREES); 2321 tail = DEACTIVATE_TO_TAIL; 2322 } 2323 2324 /* 2325 * Stage one: Count the objects on cpu's freelist as free_delta and 2326 * remember the last object in freelist_tail for later splicing. 2327 */ 2328 freelist_tail = NULL; 2329 freelist_iter = freelist; 2330 while (freelist_iter) { 2331 nextfree = get_freepointer(s, freelist_iter); 2332 2333 /* 2334 * If 'nextfree' is invalid, it is possible that the object at 2335 * 'freelist_iter' is already corrupted. So isolate all objects 2336 * starting at 'freelist_iter' by skipping them. 2337 */ 2338 if (freelist_corrupted(s, page, &freelist_iter, nextfree)) 2339 break; 2340 2341 freelist_tail = freelist_iter; 2342 free_delta++; 2343 2344 freelist_iter = nextfree; 2345 } 2346 2347 /* 2348 * Stage two: Unfreeze the page while splicing the per-cpu 2349 * freelist to the head of page's freelist. 2350 * 2351 * Ensure that the page is unfrozen while the list presence 2352 * reflects the actual number of objects during unfreeze. 2353 * 2354 * We setup the list membership and then perform a cmpxchg 2355 * with the count. If there is a mismatch then the page 2356 * is not unfrozen but the page is on the wrong list. 2357 * 2358 * Then we restart the process which may have to remove 2359 * the page from the list that we just put it on again 2360 * because the number of objects in the slab may have 2361 * changed. 2362 */ 2363 redo: 2364 2365 old.freelist = READ_ONCE(page->freelist); 2366 old.counters = READ_ONCE(page->counters); 2367 VM_BUG_ON(!old.frozen); 2368 2369 /* Determine target state of the slab */ 2370 new.counters = old.counters; 2371 if (freelist_tail) { 2372 new.inuse -= free_delta; 2373 set_freepointer(s, freelist_tail, old.freelist); 2374 new.freelist = freelist; 2375 } else 2376 new.freelist = old.freelist; 2377 2378 new.frozen = 0; 2379 2380 if (!new.inuse && n->nr_partial >= s->min_partial) 2381 m = M_FREE; 2382 else if (new.freelist) { 2383 m = M_PARTIAL; 2384 if (!lock) { 2385 lock = 1; 2386 /* 2387 * Taking the spinlock removes the possibility 2388 * that acquire_slab() will see a slab page that 2389 * is frozen 2390 */ 2391 spin_lock_irqsave(&n->list_lock, flags); 2392 } 2393 } else { 2394 m = M_FULL; 2395 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) { 2396 lock = 1; 2397 /* 2398 * This also ensures that the scanning of full 2399 * slabs from diagnostic functions will not see 2400 * any frozen slabs. 2401 */ 2402 spin_lock_irqsave(&n->list_lock, flags); 2403 } 2404 } 2405 2406 if (l != m) { 2407 if (l == M_PARTIAL) 2408 remove_partial(n, page); 2409 else if (l == M_FULL) 2410 remove_full(s, n, page); 2411 2412 if (m == M_PARTIAL) 2413 add_partial(n, page, tail); 2414 else if (m == M_FULL) 2415 add_full(s, n, page); 2416 } 2417 2418 l = m; 2419 if (!cmpxchg_double_slab(s, page, 2420 old.freelist, old.counters, 2421 new.freelist, new.counters, 2422 "unfreezing slab")) 2423 goto redo; 2424 2425 if (lock) 2426 spin_unlock_irqrestore(&n->list_lock, flags); 2427 2428 if (m == M_PARTIAL) 2429 stat(s, tail); 2430 else if (m == M_FULL) 2431 stat(s, DEACTIVATE_FULL); 2432 else if (m == M_FREE) { 2433 stat(s, DEACTIVATE_EMPTY); 2434 discard_slab(s, page); 2435 stat(s, FREE_SLAB); 2436 } 2437 } 2438 2439 #ifdef CONFIG_SLUB_CPU_PARTIAL 2440 static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page) 2441 { 2442 struct kmem_cache_node *n = NULL, *n2 = NULL; 2443 struct page *page, *discard_page = NULL; 2444 unsigned long flags = 0; 2445 2446 while (partial_page) { 2447 struct page new; 2448 struct page old; 2449 2450 page = partial_page; 2451 partial_page = page->next; 2452 2453 n2 = get_node(s, page_to_nid(page)); 2454 if (n != n2) { 2455 if (n) 2456 spin_unlock_irqrestore(&n->list_lock, flags); 2457 2458 n = n2; 2459 spin_lock_irqsave(&n->list_lock, flags); 2460 } 2461 2462 do { 2463 2464 old.freelist = page->freelist; 2465 old.counters = page->counters; 2466 VM_BUG_ON(!old.frozen); 2467 2468 new.counters = old.counters; 2469 new.freelist = old.freelist; 2470 2471 new.frozen = 0; 2472 2473 } while (!__cmpxchg_double_slab(s, page, 2474 old.freelist, old.counters, 2475 new.freelist, new.counters, 2476 "unfreezing slab")); 2477 2478 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { 2479 page->next = discard_page; 2480 discard_page = page; 2481 } else { 2482 add_partial(n, page, DEACTIVATE_TO_TAIL); 2483 stat(s, FREE_ADD_PARTIAL); 2484 } 2485 } 2486 2487 if (n) 2488 spin_unlock_irqrestore(&n->list_lock, flags); 2489 2490 while (discard_page) { 2491 page = discard_page; 2492 discard_page = discard_page->next; 2493 2494 stat(s, DEACTIVATE_EMPTY); 2495 discard_slab(s, page); 2496 stat(s, FREE_SLAB); 2497 } 2498 } 2499 2500 /* 2501 * Unfreeze all the cpu partial slabs. 2502 */ 2503 static void unfreeze_partials(struct kmem_cache *s) 2504 { 2505 struct page *partial_page; 2506 unsigned long flags; 2507 2508 local_lock_irqsave(&s->cpu_slab->lock, flags); 2509 partial_page = this_cpu_read(s->cpu_slab->partial); 2510 this_cpu_write(s->cpu_slab->partial, NULL); 2511 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2512 2513 if (partial_page) 2514 __unfreeze_partials(s, partial_page); 2515 } 2516 2517 static void unfreeze_partials_cpu(struct kmem_cache *s, 2518 struct kmem_cache_cpu *c) 2519 { 2520 struct page *partial_page; 2521 2522 partial_page = slub_percpu_partial(c); 2523 c->partial = NULL; 2524 2525 if (partial_page) 2526 __unfreeze_partials(s, partial_page); 2527 } 2528 2529 /* 2530 * Put a page that was just frozen (in __slab_free|get_partial_node) into a 2531 * partial page slot if available. 2532 * 2533 * If we did not find a slot then simply move all the partials to the 2534 * per node partial list. 2535 */ 2536 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 2537 { 2538 struct page *oldpage; 2539 struct page *page_to_unfreeze = NULL; 2540 unsigned long flags; 2541 int pages = 0; 2542 int pobjects = 0; 2543 2544 local_lock_irqsave(&s->cpu_slab->lock, flags); 2545 2546 oldpage = this_cpu_read(s->cpu_slab->partial); 2547 2548 if (oldpage) { 2549 if (drain && oldpage->pobjects > slub_cpu_partial(s)) { 2550 /* 2551 * Partial array is full. Move the existing set to the 2552 * per node partial list. Postpone the actual unfreezing 2553 * outside of the critical section. 2554 */ 2555 page_to_unfreeze = oldpage; 2556 oldpage = NULL; 2557 } else { 2558 pobjects = oldpage->pobjects; 2559 pages = oldpage->pages; 2560 } 2561 } 2562 2563 pages++; 2564 pobjects += page->objects - page->inuse; 2565 2566 page->pages = pages; 2567 page->pobjects = pobjects; 2568 page->next = oldpage; 2569 2570 this_cpu_write(s->cpu_slab->partial, page); 2571 2572 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2573 2574 if (page_to_unfreeze) { 2575 __unfreeze_partials(s, page_to_unfreeze); 2576 stat(s, CPU_PARTIAL_DRAIN); 2577 } 2578 } 2579 2580 #else /* CONFIG_SLUB_CPU_PARTIAL */ 2581 2582 static inline void unfreeze_partials(struct kmem_cache *s) { } 2583 static inline void unfreeze_partials_cpu(struct kmem_cache *s, 2584 struct kmem_cache_cpu *c) { } 2585 2586 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 2587 2588 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 2589 { 2590 unsigned long flags; 2591 struct page *page; 2592 void *freelist; 2593 2594 local_lock_irqsave(&s->cpu_slab->lock, flags); 2595 2596 page = c->page; 2597 freelist = c->freelist; 2598 2599 c->page = NULL; 2600 c->freelist = NULL; 2601 c->tid = next_tid(c->tid); 2602 2603 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2604 2605 if (page) { 2606 deactivate_slab(s, page, freelist); 2607 stat(s, CPUSLAB_FLUSH); 2608 } 2609 } 2610 2611 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 2612 { 2613 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2614 void *freelist = c->freelist; 2615 struct page *page = c->page; 2616 2617 c->page = NULL; 2618 c->freelist = NULL; 2619 c->tid = next_tid(c->tid); 2620 2621 if (page) { 2622 deactivate_slab(s, page, freelist); 2623 stat(s, CPUSLAB_FLUSH); 2624 } 2625 2626 unfreeze_partials_cpu(s, c); 2627 } 2628 2629 struct slub_flush_work { 2630 struct work_struct work; 2631 struct kmem_cache *s; 2632 bool skip; 2633 }; 2634 2635 /* 2636 * Flush cpu slab. 2637 * 2638 * Called from CPU work handler with migration disabled. 2639 */ 2640 static void flush_cpu_slab(struct work_struct *w) 2641 { 2642 struct kmem_cache *s; 2643 struct kmem_cache_cpu *c; 2644 struct slub_flush_work *sfw; 2645 2646 sfw = container_of(w, struct slub_flush_work, work); 2647 2648 s = sfw->s; 2649 c = this_cpu_ptr(s->cpu_slab); 2650 2651 if (c->page) 2652 flush_slab(s, c); 2653 2654 unfreeze_partials(s); 2655 } 2656 2657 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 2658 { 2659 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 2660 2661 return c->page || slub_percpu_partial(c); 2662 } 2663 2664 static DEFINE_MUTEX(flush_lock); 2665 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 2666 2667 static void flush_all_cpus_locked(struct kmem_cache *s) 2668 { 2669 struct slub_flush_work *sfw; 2670 unsigned int cpu; 2671 2672 lockdep_assert_cpus_held(); 2673 mutex_lock(&flush_lock); 2674 2675 for_each_online_cpu(cpu) { 2676 sfw = &per_cpu(slub_flush, cpu); 2677 if (!has_cpu_slab(cpu, s)) { 2678 sfw->skip = true; 2679 continue; 2680 } 2681 INIT_WORK(&sfw->work, flush_cpu_slab); 2682 sfw->skip = false; 2683 sfw->s = s; 2684 schedule_work_on(cpu, &sfw->work); 2685 } 2686 2687 for_each_online_cpu(cpu) { 2688 sfw = &per_cpu(slub_flush, cpu); 2689 if (sfw->skip) 2690 continue; 2691 flush_work(&sfw->work); 2692 } 2693 2694 mutex_unlock(&flush_lock); 2695 } 2696 2697 static void flush_all(struct kmem_cache *s) 2698 { 2699 cpus_read_lock(); 2700 flush_all_cpus_locked(s); 2701 cpus_read_unlock(); 2702 } 2703 2704 /* 2705 * Use the cpu notifier to insure that the cpu slabs are flushed when 2706 * necessary. 2707 */ 2708 static int slub_cpu_dead(unsigned int cpu) 2709 { 2710 struct kmem_cache *s; 2711 2712 mutex_lock(&slab_mutex); 2713 list_for_each_entry(s, &slab_caches, list) 2714 __flush_cpu_slab(s, cpu); 2715 mutex_unlock(&slab_mutex); 2716 return 0; 2717 } 2718 2719 /* 2720 * Check if the objects in a per cpu structure fit numa 2721 * locality expectations. 2722 */ 2723 static inline int node_match(struct page *page, int node) 2724 { 2725 #ifdef CONFIG_NUMA 2726 if (node != NUMA_NO_NODE && page_to_nid(page) != node) 2727 return 0; 2728 #endif 2729 return 1; 2730 } 2731 2732 #ifdef CONFIG_SLUB_DEBUG 2733 static int count_free(struct page *page) 2734 { 2735 return page->objects - page->inuse; 2736 } 2737 2738 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 2739 { 2740 return atomic_long_read(&n->total_objects); 2741 } 2742 #endif /* CONFIG_SLUB_DEBUG */ 2743 2744 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) 2745 static unsigned long count_partial(struct kmem_cache_node *n, 2746 int (*get_count)(struct page *)) 2747 { 2748 unsigned long flags; 2749 unsigned long x = 0; 2750 struct page *page; 2751 2752 spin_lock_irqsave(&n->list_lock, flags); 2753 list_for_each_entry(page, &n->partial, slab_list) 2754 x += get_count(page); 2755 spin_unlock_irqrestore(&n->list_lock, flags); 2756 return x; 2757 } 2758 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ 2759 2760 static noinline void 2761 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 2762 { 2763 #ifdef CONFIG_SLUB_DEBUG 2764 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 2765 DEFAULT_RATELIMIT_BURST); 2766 int node; 2767 struct kmem_cache_node *n; 2768 2769 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 2770 return; 2771 2772 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 2773 nid, gfpflags, &gfpflags); 2774 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 2775 s->name, s->object_size, s->size, oo_order(s->oo), 2776 oo_order(s->min)); 2777 2778 if (oo_order(s->min) > get_order(s->object_size)) 2779 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", 2780 s->name); 2781 2782 for_each_kmem_cache_node(s, node, n) { 2783 unsigned long nr_slabs; 2784 unsigned long nr_objs; 2785 unsigned long nr_free; 2786 2787 nr_free = count_partial(n, count_free); 2788 nr_slabs = node_nr_slabs(n); 2789 nr_objs = node_nr_objs(n); 2790 2791 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 2792 node, nr_slabs, nr_objs, nr_free); 2793 } 2794 #endif 2795 } 2796 2797 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) 2798 { 2799 if (unlikely(PageSlabPfmemalloc(page))) 2800 return gfp_pfmemalloc_allowed(gfpflags); 2801 2802 return true; 2803 } 2804 2805 /* 2806 * A variant of pfmemalloc_match() that tests page flags without asserting 2807 * PageSlab. Intended for opportunistic checks before taking a lock and 2808 * rechecking that nobody else freed the page under us. 2809 */ 2810 static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags) 2811 { 2812 if (unlikely(__PageSlabPfmemalloc(page))) 2813 return gfp_pfmemalloc_allowed(gfpflags); 2814 2815 return true; 2816 } 2817 2818 /* 2819 * Check the page->freelist of a page and either transfer the freelist to the 2820 * per cpu freelist or deactivate the page. 2821 * 2822 * The page is still frozen if the return value is not NULL. 2823 * 2824 * If this function returns NULL then the page has been unfrozen. 2825 */ 2826 static inline void *get_freelist(struct kmem_cache *s, struct page *page) 2827 { 2828 struct page new; 2829 unsigned long counters; 2830 void *freelist; 2831 2832 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 2833 2834 do { 2835 freelist = page->freelist; 2836 counters = page->counters; 2837 2838 new.counters = counters; 2839 VM_BUG_ON(!new.frozen); 2840 2841 new.inuse = page->objects; 2842 new.frozen = freelist != NULL; 2843 2844 } while (!__cmpxchg_double_slab(s, page, 2845 freelist, counters, 2846 NULL, new.counters, 2847 "get_freelist")); 2848 2849 return freelist; 2850 } 2851 2852 /* 2853 * Slow path. The lockless freelist is empty or we need to perform 2854 * debugging duties. 2855 * 2856 * Processing is still very fast if new objects have been freed to the 2857 * regular freelist. In that case we simply take over the regular freelist 2858 * as the lockless freelist and zap the regular freelist. 2859 * 2860 * If that is not working then we fall back to the partial lists. We take the 2861 * first element of the freelist as the object to allocate now and move the 2862 * rest of the freelist to the lockless freelist. 2863 * 2864 * And if we were unable to get a new slab from the partial slab lists then 2865 * we need to allocate a new slab. This is the slowest path since it involves 2866 * a call to the page allocator and the setup of a new slab. 2867 * 2868 * Version of __slab_alloc to use when we know that preemption is 2869 * already disabled (which is the case for bulk allocation). 2870 */ 2871 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2872 unsigned long addr, struct kmem_cache_cpu *c) 2873 { 2874 void *freelist; 2875 struct page *page; 2876 unsigned long flags; 2877 2878 stat(s, ALLOC_SLOWPATH); 2879 2880 reread_page: 2881 2882 page = READ_ONCE(c->page); 2883 if (!page) { 2884 /* 2885 * if the node is not online or has no normal memory, just 2886 * ignore the node constraint 2887 */ 2888 if (unlikely(node != NUMA_NO_NODE && 2889 !node_isset(node, slab_nodes))) 2890 node = NUMA_NO_NODE; 2891 goto new_slab; 2892 } 2893 redo: 2894 2895 if (unlikely(!node_match(page, node))) { 2896 /* 2897 * same as above but node_match() being false already 2898 * implies node != NUMA_NO_NODE 2899 */ 2900 if (!node_isset(node, slab_nodes)) { 2901 node = NUMA_NO_NODE; 2902 goto redo; 2903 } else { 2904 stat(s, ALLOC_NODE_MISMATCH); 2905 goto deactivate_slab; 2906 } 2907 } 2908 2909 /* 2910 * By rights, we should be searching for a slab page that was 2911 * PFMEMALLOC but right now, we are losing the pfmemalloc 2912 * information when the page leaves the per-cpu allocator 2913 */ 2914 if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags))) 2915 goto deactivate_slab; 2916 2917 /* must check again c->page in case we got preempted and it changed */ 2918 local_lock_irqsave(&s->cpu_slab->lock, flags); 2919 if (unlikely(page != c->page)) { 2920 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2921 goto reread_page; 2922 } 2923 freelist = c->freelist; 2924 if (freelist) 2925 goto load_freelist; 2926 2927 freelist = get_freelist(s, page); 2928 2929 if (!freelist) { 2930 c->page = NULL; 2931 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2932 stat(s, DEACTIVATE_BYPASS); 2933 goto new_slab; 2934 } 2935 2936 stat(s, ALLOC_REFILL); 2937 2938 load_freelist: 2939 2940 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 2941 2942 /* 2943 * freelist is pointing to the list of objects to be used. 2944 * page is pointing to the page from which the objects are obtained. 2945 * That page must be frozen for per cpu allocations to work. 2946 */ 2947 VM_BUG_ON(!c->page->frozen); 2948 c->freelist = get_freepointer(s, freelist); 2949 c->tid = next_tid(c->tid); 2950 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2951 return freelist; 2952 2953 deactivate_slab: 2954 2955 local_lock_irqsave(&s->cpu_slab->lock, flags); 2956 if (page != c->page) { 2957 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2958 goto reread_page; 2959 } 2960 freelist = c->freelist; 2961 c->page = NULL; 2962 c->freelist = NULL; 2963 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2964 deactivate_slab(s, page, freelist); 2965 2966 new_slab: 2967 2968 if (slub_percpu_partial(c)) { 2969 local_lock_irqsave(&s->cpu_slab->lock, flags); 2970 if (unlikely(c->page)) { 2971 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2972 goto reread_page; 2973 } 2974 if (unlikely(!slub_percpu_partial(c))) { 2975 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2976 /* we were preempted and partial list got empty */ 2977 goto new_objects; 2978 } 2979 2980 page = c->page = slub_percpu_partial(c); 2981 slub_set_percpu_partial(c, page); 2982 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 2983 stat(s, CPU_PARTIAL_ALLOC); 2984 goto redo; 2985 } 2986 2987 new_objects: 2988 2989 freelist = get_partial(s, gfpflags, node, &page); 2990 if (freelist) 2991 goto check_new_page; 2992 2993 slub_put_cpu_ptr(s->cpu_slab); 2994 page = new_slab(s, gfpflags, node); 2995 c = slub_get_cpu_ptr(s->cpu_slab); 2996 2997 if (unlikely(!page)) { 2998 slab_out_of_memory(s, gfpflags, node); 2999 return NULL; 3000 } 3001 3002 /* 3003 * No other reference to the page yet so we can 3004 * muck around with it freely without cmpxchg 3005 */ 3006 freelist = page->freelist; 3007 page->freelist = NULL; 3008 3009 stat(s, ALLOC_SLAB); 3010 3011 check_new_page: 3012 3013 if (kmem_cache_debug(s)) { 3014 if (!alloc_debug_processing(s, page, freelist, addr)) { 3015 /* Slab failed checks. Next slab needed */ 3016 goto new_slab; 3017 } else { 3018 /* 3019 * For debug case, we don't load freelist so that all 3020 * allocations go through alloc_debug_processing() 3021 */ 3022 goto return_single; 3023 } 3024 } 3025 3026 if (unlikely(!pfmemalloc_match(page, gfpflags))) 3027 /* 3028 * For !pfmemalloc_match() case we don't load freelist so that 3029 * we don't make further mismatched allocations easier. 3030 */ 3031 goto return_single; 3032 3033 retry_load_page: 3034 3035 local_lock_irqsave(&s->cpu_slab->lock, flags); 3036 if (unlikely(c->page)) { 3037 void *flush_freelist = c->freelist; 3038 struct page *flush_page = c->page; 3039 3040 c->page = NULL; 3041 c->freelist = NULL; 3042 c->tid = next_tid(c->tid); 3043 3044 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3045 3046 deactivate_slab(s, flush_page, flush_freelist); 3047 3048 stat(s, CPUSLAB_FLUSH); 3049 3050 goto retry_load_page; 3051 } 3052 c->page = page; 3053 3054 goto load_freelist; 3055 3056 return_single: 3057 3058 deactivate_slab(s, page, get_freepointer(s, freelist)); 3059 return freelist; 3060 } 3061 3062 /* 3063 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3064 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3065 * pointer. 3066 */ 3067 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3068 unsigned long addr, struct kmem_cache_cpu *c) 3069 { 3070 void *p; 3071 3072 #ifdef CONFIG_PREEMPT_COUNT 3073 /* 3074 * We may have been preempted and rescheduled on a different 3075 * cpu before disabling preemption. Need to reload cpu area 3076 * pointer. 3077 */ 3078 c = slub_get_cpu_ptr(s->cpu_slab); 3079 #endif 3080 3081 p = ___slab_alloc(s, gfpflags, node, addr, c); 3082 #ifdef CONFIG_PREEMPT_COUNT 3083 slub_put_cpu_ptr(s->cpu_slab); 3084 #endif 3085 return p; 3086 } 3087 3088 /* 3089 * If the object has been wiped upon free, make sure it's fully initialized by 3090 * zeroing out freelist pointer. 3091 */ 3092 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3093 void *obj) 3094 { 3095 if (unlikely(slab_want_init_on_free(s)) && obj) 3096 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3097 0, sizeof(void *)); 3098 } 3099 3100 /* 3101 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 3102 * have the fastpath folded into their functions. So no function call 3103 * overhead for requests that can be satisfied on the fastpath. 3104 * 3105 * The fastpath works by first checking if the lockless freelist can be used. 3106 * If not then __slab_alloc is called for slow processing. 3107 * 3108 * Otherwise we can simply pick the next object from the lockless free list. 3109 */ 3110 static __always_inline void *slab_alloc_node(struct kmem_cache *s, 3111 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3112 { 3113 void *object; 3114 struct kmem_cache_cpu *c; 3115 struct page *page; 3116 unsigned long tid; 3117 struct obj_cgroup *objcg = NULL; 3118 bool init = false; 3119 3120 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); 3121 if (!s) 3122 return NULL; 3123 3124 object = kfence_alloc(s, orig_size, gfpflags); 3125 if (unlikely(object)) 3126 goto out; 3127 3128 redo: 3129 /* 3130 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3131 * enabled. We may switch back and forth between cpus while 3132 * reading from one cpu area. That does not matter as long 3133 * as we end up on the original cpu again when doing the cmpxchg. 3134 * 3135 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3136 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3137 * the tid. If we are preempted and switched to another cpu between the 3138 * two reads, it's OK as the two are still associated with the same cpu 3139 * and cmpxchg later will validate the cpu. 3140 */ 3141 c = raw_cpu_ptr(s->cpu_slab); 3142 tid = READ_ONCE(c->tid); 3143 3144 /* 3145 * Irqless object alloc/free algorithm used here depends on sequence 3146 * of fetching cpu_slab's data. tid should be fetched before anything 3147 * on c to guarantee that object and page associated with previous tid 3148 * won't be used with current tid. If we fetch tid first, object and 3149 * page could be one associated with next tid and our alloc/free 3150 * request will be failed. In this case, we will retry. So, no problem. 3151 */ 3152 barrier(); 3153 3154 /* 3155 * The transaction ids are globally unique per cpu and per operation on 3156 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3157 * occurs on the right processor and that there was no operation on the 3158 * linked list in between. 3159 */ 3160 3161 object = c->freelist; 3162 page = c->page; 3163 /* 3164 * We cannot use the lockless fastpath on PREEMPT_RT because if a 3165 * slowpath has taken the local_lock_irqsave(), it is not protected 3166 * against a fast path operation in an irq handler. So we need to take 3167 * the slow path which uses local_lock. It is still relatively fast if 3168 * there is a suitable cpu freelist. 3169 */ 3170 if (IS_ENABLED(CONFIG_PREEMPT_RT) || 3171 unlikely(!object || !page || !node_match(page, node))) { 3172 object = __slab_alloc(s, gfpflags, node, addr, c); 3173 } else { 3174 void *next_object = get_freepointer_safe(s, object); 3175 3176 /* 3177 * The cmpxchg will only match if there was no additional 3178 * operation and if we are on the right processor. 3179 * 3180 * The cmpxchg does the following atomically (without lock 3181 * semantics!) 3182 * 1. Relocate first pointer to the current per cpu area. 3183 * 2. Verify that tid and freelist have not been changed 3184 * 3. If they were not changed replace tid and freelist 3185 * 3186 * Since this is without lock semantics the protection is only 3187 * against code executing on this cpu *not* from access by 3188 * other cpus. 3189 */ 3190 if (unlikely(!this_cpu_cmpxchg_double( 3191 s->cpu_slab->freelist, s->cpu_slab->tid, 3192 object, tid, 3193 next_object, next_tid(tid)))) { 3194 3195 note_cmpxchg_failure("slab_alloc", s, tid); 3196 goto redo; 3197 } 3198 prefetch_freepointer(s, next_object); 3199 stat(s, ALLOC_FASTPATH); 3200 } 3201 3202 maybe_wipe_obj_freeptr(s, object); 3203 init = slab_want_init_on_alloc(gfpflags, s); 3204 3205 out: 3206 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); 3207 3208 return object; 3209 } 3210 3211 static __always_inline void *slab_alloc(struct kmem_cache *s, 3212 gfp_t gfpflags, unsigned long addr, size_t orig_size) 3213 { 3214 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); 3215 } 3216 3217 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 3218 { 3219 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); 3220 3221 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, 3222 s->size, gfpflags); 3223 3224 return ret; 3225 } 3226 EXPORT_SYMBOL(kmem_cache_alloc); 3227 3228 #ifdef CONFIG_TRACING 3229 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 3230 { 3231 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); 3232 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 3233 ret = kasan_kmalloc(s, ret, size, gfpflags); 3234 return ret; 3235 } 3236 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3237 #endif 3238 3239 #ifdef CONFIG_NUMA 3240 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 3241 { 3242 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); 3243 3244 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3245 s->object_size, s->size, gfpflags, node); 3246 3247 return ret; 3248 } 3249 EXPORT_SYMBOL(kmem_cache_alloc_node); 3250 3251 #ifdef CONFIG_TRACING 3252 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 3253 gfp_t gfpflags, 3254 int node, size_t size) 3255 { 3256 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); 3257 3258 trace_kmalloc_node(_RET_IP_, ret, 3259 size, s->size, gfpflags, node); 3260 3261 ret = kasan_kmalloc(s, ret, size, gfpflags); 3262 return ret; 3263 } 3264 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3265 #endif 3266 #endif /* CONFIG_NUMA */ 3267 3268 /* 3269 * Slow path handling. This may still be called frequently since objects 3270 * have a longer lifetime than the cpu slabs in most processing loads. 3271 * 3272 * So we still attempt to reduce cache line usage. Just take the slab 3273 * lock and free the item. If there is no additional partial page 3274 * handling required then we can return immediately. 3275 */ 3276 static void __slab_free(struct kmem_cache *s, struct page *page, 3277 void *head, void *tail, int cnt, 3278 unsigned long addr) 3279 3280 { 3281 void *prior; 3282 int was_frozen; 3283 struct page new; 3284 unsigned long counters; 3285 struct kmem_cache_node *n = NULL; 3286 unsigned long flags; 3287 3288 stat(s, FREE_SLOWPATH); 3289 3290 if (kfence_free(head)) 3291 return; 3292 3293 if (kmem_cache_debug(s) && 3294 !free_debug_processing(s, page, head, tail, cnt, addr)) 3295 return; 3296 3297 do { 3298 if (unlikely(n)) { 3299 spin_unlock_irqrestore(&n->list_lock, flags); 3300 n = NULL; 3301 } 3302 prior = page->freelist; 3303 counters = page->counters; 3304 set_freepointer(s, tail, prior); 3305 new.counters = counters; 3306 was_frozen = new.frozen; 3307 new.inuse -= cnt; 3308 if ((!new.inuse || !prior) && !was_frozen) { 3309 3310 if (kmem_cache_has_cpu_partial(s) && !prior) { 3311 3312 /* 3313 * Slab was on no list before and will be 3314 * partially empty 3315 * We can defer the list move and instead 3316 * freeze it. 3317 */ 3318 new.frozen = 1; 3319 3320 } else { /* Needs to be taken off a list */ 3321 3322 n = get_node(s, page_to_nid(page)); 3323 /* 3324 * Speculatively acquire the list_lock. 3325 * If the cmpxchg does not succeed then we may 3326 * drop the list_lock without any processing. 3327 * 3328 * Otherwise the list_lock will synchronize with 3329 * other processors updating the list of slabs. 3330 */ 3331 spin_lock_irqsave(&n->list_lock, flags); 3332 3333 } 3334 } 3335 3336 } while (!cmpxchg_double_slab(s, page, 3337 prior, counters, 3338 head, new.counters, 3339 "__slab_free")); 3340 3341 if (likely(!n)) { 3342 3343 if (likely(was_frozen)) { 3344 /* 3345 * The list lock was not taken therefore no list 3346 * activity can be necessary. 3347 */ 3348 stat(s, FREE_FROZEN); 3349 } else if (new.frozen) { 3350 /* 3351 * If we just froze the page then put it onto the 3352 * per cpu partial list. 3353 */ 3354 put_cpu_partial(s, page, 1); 3355 stat(s, CPU_PARTIAL_FREE); 3356 } 3357 3358 return; 3359 } 3360 3361 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 3362 goto slab_empty; 3363 3364 /* 3365 * Objects left in the slab. If it was not on the partial list before 3366 * then add it. 3367 */ 3368 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 3369 remove_full(s, n, page); 3370 add_partial(n, page, DEACTIVATE_TO_TAIL); 3371 stat(s, FREE_ADD_PARTIAL); 3372 } 3373 spin_unlock_irqrestore(&n->list_lock, flags); 3374 return; 3375 3376 slab_empty: 3377 if (prior) { 3378 /* 3379 * Slab on the partial list. 3380 */ 3381 remove_partial(n, page); 3382 stat(s, FREE_REMOVE_PARTIAL); 3383 } else { 3384 /* Slab must be on the full list */ 3385 remove_full(s, n, page); 3386 } 3387 3388 spin_unlock_irqrestore(&n->list_lock, flags); 3389 stat(s, FREE_SLAB); 3390 discard_slab(s, page); 3391 } 3392 3393 /* 3394 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 3395 * can perform fastpath freeing without additional function calls. 3396 * 3397 * The fastpath is only possible if we are freeing to the current cpu slab 3398 * of this processor. This typically the case if we have just allocated 3399 * the item before. 3400 * 3401 * If fastpath is not possible then fall back to __slab_free where we deal 3402 * with all sorts of special processing. 3403 * 3404 * Bulk free of a freelist with several objects (all pointing to the 3405 * same page) possible by specifying head and tail ptr, plus objects 3406 * count (cnt). Bulk free indicated by tail pointer being set. 3407 */ 3408 static __always_inline void do_slab_free(struct kmem_cache *s, 3409 struct page *page, void *head, void *tail, 3410 int cnt, unsigned long addr) 3411 { 3412 void *tail_obj = tail ? : head; 3413 struct kmem_cache_cpu *c; 3414 unsigned long tid; 3415 3416 memcg_slab_free_hook(s, &head, 1); 3417 redo: 3418 /* 3419 * Determine the currently cpus per cpu slab. 3420 * The cpu may change afterward. However that does not matter since 3421 * data is retrieved via this pointer. If we are on the same cpu 3422 * during the cmpxchg then the free will succeed. 3423 */ 3424 c = raw_cpu_ptr(s->cpu_slab); 3425 tid = READ_ONCE(c->tid); 3426 3427 /* Same with comment on barrier() in slab_alloc_node() */ 3428 barrier(); 3429 3430 if (likely(page == c->page)) { 3431 #ifndef CONFIG_PREEMPT_RT 3432 void **freelist = READ_ONCE(c->freelist); 3433 3434 set_freepointer(s, tail_obj, freelist); 3435 3436 if (unlikely(!this_cpu_cmpxchg_double( 3437 s->cpu_slab->freelist, s->cpu_slab->tid, 3438 freelist, tid, 3439 head, next_tid(tid)))) { 3440 3441 note_cmpxchg_failure("slab_free", s, tid); 3442 goto redo; 3443 } 3444 #else /* CONFIG_PREEMPT_RT */ 3445 /* 3446 * We cannot use the lockless fastpath on PREEMPT_RT because if 3447 * a slowpath has taken the local_lock_irqsave(), it is not 3448 * protected against a fast path operation in an irq handler. So 3449 * we need to take the local_lock. We shouldn't simply defer to 3450 * __slab_free() as that wouldn't use the cpu freelist at all. 3451 */ 3452 void **freelist; 3453 3454 local_lock(&s->cpu_slab->lock); 3455 c = this_cpu_ptr(s->cpu_slab); 3456 if (unlikely(page != c->page)) { 3457 local_unlock(&s->cpu_slab->lock); 3458 goto redo; 3459 } 3460 tid = c->tid; 3461 freelist = c->freelist; 3462 3463 set_freepointer(s, tail_obj, freelist); 3464 c->freelist = head; 3465 c->tid = next_tid(tid); 3466 3467 local_unlock(&s->cpu_slab->lock); 3468 #endif 3469 stat(s, FREE_FASTPATH); 3470 } else 3471 __slab_free(s, page, head, tail_obj, cnt, addr); 3472 3473 } 3474 3475 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, 3476 void *head, void *tail, int cnt, 3477 unsigned long addr) 3478 { 3479 /* 3480 * With KASAN enabled slab_free_freelist_hook modifies the freelist 3481 * to remove objects, whose reuse must be delayed. 3482 */ 3483 if (slab_free_freelist_hook(s, &head, &tail)) 3484 do_slab_free(s, page, head, tail, cnt, addr); 3485 } 3486 3487 #ifdef CONFIG_KASAN_GENERIC 3488 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 3489 { 3490 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); 3491 } 3492 #endif 3493 3494 void kmem_cache_free(struct kmem_cache *s, void *x) 3495 { 3496 s = cache_from_obj(s, x); 3497 if (!s) 3498 return; 3499 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); 3500 trace_kmem_cache_free(_RET_IP_, x, s->name); 3501 } 3502 EXPORT_SYMBOL(kmem_cache_free); 3503 3504 struct detached_freelist { 3505 struct page *page; 3506 void *tail; 3507 void *freelist; 3508 int cnt; 3509 struct kmem_cache *s; 3510 }; 3511 3512 static inline void free_nonslab_page(struct page *page, void *object) 3513 { 3514 unsigned int order = compound_order(page); 3515 3516 VM_BUG_ON_PAGE(!PageCompound(page), page); 3517 kfree_hook(object); 3518 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); 3519 __free_pages(page, order); 3520 } 3521 3522 /* 3523 * This function progressively scans the array with free objects (with 3524 * a limited look ahead) and extract objects belonging to the same 3525 * page. It builds a detached freelist directly within the given 3526 * page/objects. This can happen without any need for 3527 * synchronization, because the objects are owned by running process. 3528 * The freelist is build up as a single linked list in the objects. 3529 * The idea is, that this detached freelist can then be bulk 3530 * transferred to the real freelist(s), but only requiring a single 3531 * synchronization primitive. Look ahead in the array is limited due 3532 * to performance reasons. 3533 */ 3534 static inline 3535 int build_detached_freelist(struct kmem_cache *s, size_t size, 3536 void **p, struct detached_freelist *df) 3537 { 3538 size_t first_skipped_index = 0; 3539 int lookahead = 3; 3540 void *object; 3541 struct page *page; 3542 3543 /* Always re-init detached_freelist */ 3544 df->page = NULL; 3545 3546 do { 3547 object = p[--size]; 3548 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */ 3549 } while (!object && size); 3550 3551 if (!object) 3552 return 0; 3553 3554 page = virt_to_head_page(object); 3555 if (!s) { 3556 /* Handle kalloc'ed objects */ 3557 if (unlikely(!PageSlab(page))) { 3558 free_nonslab_page(page, object); 3559 p[size] = NULL; /* mark object processed */ 3560 return size; 3561 } 3562 /* Derive kmem_cache from object */ 3563 df->s = page->slab_cache; 3564 } else { 3565 df->s = cache_from_obj(s, object); /* Support for memcg */ 3566 } 3567 3568 if (is_kfence_address(object)) { 3569 slab_free_hook(df->s, object, false); 3570 __kfence_free(object); 3571 p[size] = NULL; /* mark object processed */ 3572 return size; 3573 } 3574 3575 /* Start new detached freelist */ 3576 df->page = page; 3577 set_freepointer(df->s, object, NULL); 3578 df->tail = object; 3579 df->freelist = object; 3580 p[size] = NULL; /* mark object processed */ 3581 df->cnt = 1; 3582 3583 while (size) { 3584 object = p[--size]; 3585 if (!object) 3586 continue; /* Skip processed objects */ 3587 3588 /* df->page is always set at this point */ 3589 if (df->page == virt_to_head_page(object)) { 3590 /* Opportunity build freelist */ 3591 set_freepointer(df->s, object, df->freelist); 3592 df->freelist = object; 3593 df->cnt++; 3594 p[size] = NULL; /* mark object processed */ 3595 3596 continue; 3597 } 3598 3599 /* Limit look ahead search */ 3600 if (!--lookahead) 3601 break; 3602 3603 if (!first_skipped_index) 3604 first_skipped_index = size + 1; 3605 } 3606 3607 return first_skipped_index; 3608 } 3609 3610 /* Note that interrupts must be enabled when calling this function. */ 3611 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3612 { 3613 if (WARN_ON(!size)) 3614 return; 3615 3616 memcg_slab_free_hook(s, p, size); 3617 do { 3618 struct detached_freelist df; 3619 3620 size = build_detached_freelist(s, size, p, &df); 3621 if (!df.page) 3622 continue; 3623 3624 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); 3625 } while (likely(size)); 3626 } 3627 EXPORT_SYMBOL(kmem_cache_free_bulk); 3628 3629 /* Note that interrupts must be enabled when calling this function. */ 3630 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3631 void **p) 3632 { 3633 struct kmem_cache_cpu *c; 3634 int i; 3635 struct obj_cgroup *objcg = NULL; 3636 3637 /* memcg and kmem_cache debug support */ 3638 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3639 if (unlikely(!s)) 3640 return false; 3641 /* 3642 * Drain objects in the per cpu slab, while disabling local 3643 * IRQs, which protects against PREEMPT and interrupts 3644 * handlers invoking normal fastpath. 3645 */ 3646 c = slub_get_cpu_ptr(s->cpu_slab); 3647 local_lock_irq(&s->cpu_slab->lock); 3648 3649 for (i = 0; i < size; i++) { 3650 void *object = kfence_alloc(s, s->object_size, flags); 3651 3652 if (unlikely(object)) { 3653 p[i] = object; 3654 continue; 3655 } 3656 3657 object = c->freelist; 3658 if (unlikely(!object)) { 3659 /* 3660 * We may have removed an object from c->freelist using 3661 * the fastpath in the previous iteration; in that case, 3662 * c->tid has not been bumped yet. 3663 * Since ___slab_alloc() may reenable interrupts while 3664 * allocating memory, we should bump c->tid now. 3665 */ 3666 c->tid = next_tid(c->tid); 3667 3668 local_unlock_irq(&s->cpu_slab->lock); 3669 3670 /* 3671 * Invoking slow path likely have side-effect 3672 * of re-populating per CPU c->freelist 3673 */ 3674 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 3675 _RET_IP_, c); 3676 if (unlikely(!p[i])) 3677 goto error; 3678 3679 c = this_cpu_ptr(s->cpu_slab); 3680 maybe_wipe_obj_freeptr(s, p[i]); 3681 3682 local_lock_irq(&s->cpu_slab->lock); 3683 3684 continue; /* goto for-loop */ 3685 } 3686 c->freelist = get_freepointer(s, object); 3687 p[i] = object; 3688 maybe_wipe_obj_freeptr(s, p[i]); 3689 } 3690 c->tid = next_tid(c->tid); 3691 local_unlock_irq(&s->cpu_slab->lock); 3692 slub_put_cpu_ptr(s->cpu_slab); 3693 3694 /* 3695 * memcg and kmem_cache debug support and memory initialization. 3696 * Done outside of the IRQ disabled fastpath loop. 3697 */ 3698 slab_post_alloc_hook(s, objcg, flags, size, p, 3699 slab_want_init_on_alloc(flags, s)); 3700 return i; 3701 error: 3702 slub_put_cpu_ptr(s->cpu_slab); 3703 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3704 __kmem_cache_free_bulk(s, i, p); 3705 return 0; 3706 } 3707 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3708 3709 3710 /* 3711 * Object placement in a slab is made very easy because we always start at 3712 * offset 0. If we tune the size of the object to the alignment then we can 3713 * get the required alignment by putting one properly sized object after 3714 * another. 3715 * 3716 * Notice that the allocation order determines the sizes of the per cpu 3717 * caches. Each processor has always one slab available for allocations. 3718 * Increasing the allocation order reduces the number of times that slabs 3719 * must be moved on and off the partial lists and is therefore a factor in 3720 * locking overhead. 3721 */ 3722 3723 /* 3724 * Minimum / Maximum order of slab pages. This influences locking overhead 3725 * and slab fragmentation. A higher order reduces the number of partial slabs 3726 * and increases the number of allocations possible without having to 3727 * take the list_lock. 3728 */ 3729 static unsigned int slub_min_order; 3730 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; 3731 static unsigned int slub_min_objects; 3732 3733 /* 3734 * Calculate the order of allocation given an slab object size. 3735 * 3736 * The order of allocation has significant impact on performance and other 3737 * system components. Generally order 0 allocations should be preferred since 3738 * order 0 does not cause fragmentation in the page allocator. Larger objects 3739 * be problematic to put into order 0 slabs because there may be too much 3740 * unused space left. We go to a higher order if more than 1/16th of the slab 3741 * would be wasted. 3742 * 3743 * In order to reach satisfactory performance we must ensure that a minimum 3744 * number of objects is in one slab. Otherwise we may generate too much 3745 * activity on the partial lists which requires taking the list_lock. This is 3746 * less a concern for large slabs though which are rarely used. 3747 * 3748 * slub_max_order specifies the order where we begin to stop considering the 3749 * number of objects in a slab as critical. If we reach slub_max_order then 3750 * we try to keep the page order as low as possible. So we accept more waste 3751 * of space in favor of a small page order. 3752 * 3753 * Higher order allocations also allow the placement of more objects in a 3754 * slab and thereby reduce object handling overhead. If the user has 3755 * requested a higher minimum order then we start with that one instead of 3756 * the smallest order which will fit the object. 3757 */ 3758 static inline unsigned int slab_order(unsigned int size, 3759 unsigned int min_objects, unsigned int max_order, 3760 unsigned int fract_leftover) 3761 { 3762 unsigned int min_order = slub_min_order; 3763 unsigned int order; 3764 3765 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 3766 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 3767 3768 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); 3769 order <= max_order; order++) { 3770 3771 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 3772 unsigned int rem; 3773 3774 rem = slab_size % size; 3775 3776 if (rem <= slab_size / fract_leftover) 3777 break; 3778 } 3779 3780 return order; 3781 } 3782 3783 static inline int calculate_order(unsigned int size) 3784 { 3785 unsigned int order; 3786 unsigned int min_objects; 3787 unsigned int max_objects; 3788 unsigned int nr_cpus; 3789 3790 /* 3791 * Attempt to find best configuration for a slab. This 3792 * works by first attempting to generate a layout with 3793 * the best configuration and backing off gradually. 3794 * 3795 * First we increase the acceptable waste in a slab. Then 3796 * we reduce the minimum objects required in a slab. 3797 */ 3798 min_objects = slub_min_objects; 3799 if (!min_objects) { 3800 /* 3801 * Some architectures will only update present cpus when 3802 * onlining them, so don't trust the number if it's just 1. But 3803 * we also don't want to use nr_cpu_ids always, as on some other 3804 * architectures, there can be many possible cpus, but never 3805 * onlined. Here we compromise between trying to avoid too high 3806 * order on systems that appear larger than they are, and too 3807 * low order on systems that appear smaller than they are. 3808 */ 3809 nr_cpus = num_present_cpus(); 3810 if (nr_cpus <= 1) 3811 nr_cpus = nr_cpu_ids; 3812 min_objects = 4 * (fls(nr_cpus) + 1); 3813 } 3814 max_objects = order_objects(slub_max_order, size); 3815 min_objects = min(min_objects, max_objects); 3816 3817 while (min_objects > 1) { 3818 unsigned int fraction; 3819 3820 fraction = 16; 3821 while (fraction >= 4) { 3822 order = slab_order(size, min_objects, 3823 slub_max_order, fraction); 3824 if (order <= slub_max_order) 3825 return order; 3826 fraction /= 2; 3827 } 3828 min_objects--; 3829 } 3830 3831 /* 3832 * We were unable to place multiple objects in a slab. Now 3833 * lets see if we can place a single object there. 3834 */ 3835 order = slab_order(size, 1, slub_max_order, 1); 3836 if (order <= slub_max_order) 3837 return order; 3838 3839 /* 3840 * Doh this slab cannot be placed using slub_max_order. 3841 */ 3842 order = slab_order(size, 1, MAX_ORDER, 1); 3843 if (order < MAX_ORDER) 3844 return order; 3845 return -ENOSYS; 3846 } 3847 3848 static void 3849 init_kmem_cache_node(struct kmem_cache_node *n) 3850 { 3851 n->nr_partial = 0; 3852 spin_lock_init(&n->list_lock); 3853 INIT_LIST_HEAD(&n->partial); 3854 #ifdef CONFIG_SLUB_DEBUG 3855 atomic_long_set(&n->nr_slabs, 0); 3856 atomic_long_set(&n->total_objects, 0); 3857 INIT_LIST_HEAD(&n->full); 3858 #endif 3859 } 3860 3861 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 3862 { 3863 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 3864 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); 3865 3866 /* 3867 * Must align to double word boundary for the double cmpxchg 3868 * instructions to work; see __pcpu_double_call_return_bool(). 3869 */ 3870 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 3871 2 * sizeof(void *)); 3872 3873 if (!s->cpu_slab) 3874 return 0; 3875 3876 init_kmem_cache_cpus(s); 3877 3878 return 1; 3879 } 3880 3881 static struct kmem_cache *kmem_cache_node; 3882 3883 /* 3884 * No kmalloc_node yet so do it by hand. We know that this is the first 3885 * slab on the node for this slabcache. There are no concurrent accesses 3886 * possible. 3887 * 3888 * Note that this function only works on the kmem_cache_node 3889 * when allocating for the kmem_cache_node. This is used for bootstrapping 3890 * memory on a fresh node that has no slab structures yet. 3891 */ 3892 static void early_kmem_cache_node_alloc(int node) 3893 { 3894 struct page *page; 3895 struct kmem_cache_node *n; 3896 3897 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 3898 3899 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); 3900 3901 BUG_ON(!page); 3902 if (page_to_nid(page) != node) { 3903 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 3904 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 3905 } 3906 3907 n = page->freelist; 3908 BUG_ON(!n); 3909 #ifdef CONFIG_SLUB_DEBUG 3910 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3911 init_tracking(kmem_cache_node, n); 3912 #endif 3913 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 3914 page->freelist = get_freepointer(kmem_cache_node, n); 3915 page->inuse = 1; 3916 page->frozen = 0; 3917 kmem_cache_node->node[node] = n; 3918 init_kmem_cache_node(n); 3919 inc_slabs_node(kmem_cache_node, node, page->objects); 3920 3921 /* 3922 * No locks need to be taken here as it has just been 3923 * initialized and there is no concurrent access. 3924 */ 3925 __add_partial(n, page, DEACTIVATE_TO_HEAD); 3926 } 3927 3928 static void free_kmem_cache_nodes(struct kmem_cache *s) 3929 { 3930 int node; 3931 struct kmem_cache_node *n; 3932 3933 for_each_kmem_cache_node(s, node, n) { 3934 s->node[node] = NULL; 3935 kmem_cache_free(kmem_cache_node, n); 3936 } 3937 } 3938 3939 void __kmem_cache_release(struct kmem_cache *s) 3940 { 3941 cache_random_seq_destroy(s); 3942 free_percpu(s->cpu_slab); 3943 free_kmem_cache_nodes(s); 3944 } 3945 3946 static int init_kmem_cache_nodes(struct kmem_cache *s) 3947 { 3948 int node; 3949 3950 for_each_node_mask(node, slab_nodes) { 3951 struct kmem_cache_node *n; 3952 3953 if (slab_state == DOWN) { 3954 early_kmem_cache_node_alloc(node); 3955 continue; 3956 } 3957 n = kmem_cache_alloc_node(kmem_cache_node, 3958 GFP_KERNEL, node); 3959 3960 if (!n) { 3961 free_kmem_cache_nodes(s); 3962 return 0; 3963 } 3964 3965 init_kmem_cache_node(n); 3966 s->node[node] = n; 3967 } 3968 return 1; 3969 } 3970 3971 static void set_min_partial(struct kmem_cache *s, unsigned long min) 3972 { 3973 if (min < MIN_PARTIAL) 3974 min = MIN_PARTIAL; 3975 else if (min > MAX_PARTIAL) 3976 min = MAX_PARTIAL; 3977 s->min_partial = min; 3978 } 3979 3980 static void set_cpu_partial(struct kmem_cache *s) 3981 { 3982 #ifdef CONFIG_SLUB_CPU_PARTIAL 3983 /* 3984 * cpu_partial determined the maximum number of objects kept in the 3985 * per cpu partial lists of a processor. 3986 * 3987 * Per cpu partial lists mainly contain slabs that just have one 3988 * object freed. If they are used for allocation then they can be 3989 * filled up again with minimal effort. The slab will never hit the 3990 * per node partial lists and therefore no locking will be required. 3991 * 3992 * This setting also determines 3993 * 3994 * A) The number of objects from per cpu partial slabs dumped to the 3995 * per node list when we reach the limit. 3996 * B) The number of objects in cpu partial slabs to extract from the 3997 * per node list when we run out of per cpu objects. We only fetch 3998 * 50% to keep some capacity around for frees. 3999 */ 4000 if (!kmem_cache_has_cpu_partial(s)) 4001 slub_set_cpu_partial(s, 0); 4002 else if (s->size >= PAGE_SIZE) 4003 slub_set_cpu_partial(s, 2); 4004 else if (s->size >= 1024) 4005 slub_set_cpu_partial(s, 6); 4006 else if (s->size >= 256) 4007 slub_set_cpu_partial(s, 13); 4008 else 4009 slub_set_cpu_partial(s, 30); 4010 #endif 4011 } 4012 4013 /* 4014 * calculate_sizes() determines the order and the distribution of data within 4015 * a slab object. 4016 */ 4017 static int calculate_sizes(struct kmem_cache *s, int forced_order) 4018 { 4019 slab_flags_t flags = s->flags; 4020 unsigned int size = s->object_size; 4021 unsigned int order; 4022 4023 /* 4024 * Round up object size to the next word boundary. We can only 4025 * place the free pointer at word boundaries and this determines 4026 * the possible location of the free pointer. 4027 */ 4028 size = ALIGN(size, sizeof(void *)); 4029 4030 #ifdef CONFIG_SLUB_DEBUG 4031 /* 4032 * Determine if we can poison the object itself. If the user of 4033 * the slab may touch the object after free or before allocation 4034 * then we should never poison the object itself. 4035 */ 4036 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 4037 !s->ctor) 4038 s->flags |= __OBJECT_POISON; 4039 else 4040 s->flags &= ~__OBJECT_POISON; 4041 4042 4043 /* 4044 * If we are Redzoning then check if there is some space between the 4045 * end of the object and the free pointer. If not then add an 4046 * additional word to have some bytes to store Redzone information. 4047 */ 4048 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 4049 size += sizeof(void *); 4050 #endif 4051 4052 /* 4053 * With that we have determined the number of bytes in actual use 4054 * by the object and redzoning. 4055 */ 4056 s->inuse = size; 4057 4058 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || 4059 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || 4060 s->ctor) { 4061 /* 4062 * Relocate free pointer after the object if it is not 4063 * permitted to overwrite the first word of the object on 4064 * kmem_cache_free. 4065 * 4066 * This is the case if we do RCU, have a constructor or 4067 * destructor, are poisoning the objects, or are 4068 * redzoning an object smaller than sizeof(void *). 4069 * 4070 * The assumption that s->offset >= s->inuse means free 4071 * pointer is outside of the object is used in the 4072 * freeptr_outside_object() function. If that is no 4073 * longer true, the function needs to be modified. 4074 */ 4075 s->offset = size; 4076 size += sizeof(void *); 4077 } else { 4078 /* 4079 * Store freelist pointer near middle of object to keep 4080 * it away from the edges of the object to avoid small 4081 * sized over/underflows from neighboring allocations. 4082 */ 4083 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 4084 } 4085 4086 #ifdef CONFIG_SLUB_DEBUG 4087 if (flags & SLAB_STORE_USER) 4088 /* 4089 * Need to store information about allocs and frees after 4090 * the object. 4091 */ 4092 size += 2 * sizeof(struct track); 4093 #endif 4094 4095 kasan_cache_create(s, &size, &s->flags); 4096 #ifdef CONFIG_SLUB_DEBUG 4097 if (flags & SLAB_RED_ZONE) { 4098 /* 4099 * Add some empty padding so that we can catch 4100 * overwrites from earlier objects rather than let 4101 * tracking information or the free pointer be 4102 * corrupted if a user writes before the start 4103 * of the object. 4104 */ 4105 size += sizeof(void *); 4106 4107 s->red_left_pad = sizeof(void *); 4108 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 4109 size += s->red_left_pad; 4110 } 4111 #endif 4112 4113 /* 4114 * SLUB stores one object immediately after another beginning from 4115 * offset 0. In order to align the objects we have to simply size 4116 * each object to conform to the alignment. 4117 */ 4118 size = ALIGN(size, s->align); 4119 s->size = size; 4120 s->reciprocal_size = reciprocal_value(size); 4121 if (forced_order >= 0) 4122 order = forced_order; 4123 else 4124 order = calculate_order(size); 4125 4126 if ((int)order < 0) 4127 return 0; 4128 4129 s->allocflags = 0; 4130 if (order) 4131 s->allocflags |= __GFP_COMP; 4132 4133 if (s->flags & SLAB_CACHE_DMA) 4134 s->allocflags |= GFP_DMA; 4135 4136 if (s->flags & SLAB_CACHE_DMA32) 4137 s->allocflags |= GFP_DMA32; 4138 4139 if (s->flags & SLAB_RECLAIM_ACCOUNT) 4140 s->allocflags |= __GFP_RECLAIMABLE; 4141 4142 /* 4143 * Determine the number of objects per slab 4144 */ 4145 s->oo = oo_make(order, size); 4146 s->min = oo_make(get_order(size), size); 4147 if (oo_objects(s->oo) > oo_objects(s->max)) 4148 s->max = s->oo; 4149 4150 return !!oo_objects(s->oo); 4151 } 4152 4153 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 4154 { 4155 s->flags = kmem_cache_flags(s->size, flags, s->name); 4156 #ifdef CONFIG_SLAB_FREELIST_HARDENED 4157 s->random = get_random_long(); 4158 #endif 4159 4160 if (!calculate_sizes(s, -1)) 4161 goto error; 4162 if (disable_higher_order_debug) { 4163 /* 4164 * Disable debugging flags that store metadata if the min slab 4165 * order increased. 4166 */ 4167 if (get_order(s->size) > get_order(s->object_size)) { 4168 s->flags &= ~DEBUG_METADATA_FLAGS; 4169 s->offset = 0; 4170 if (!calculate_sizes(s, -1)) 4171 goto error; 4172 } 4173 } 4174 4175 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 4176 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 4177 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) 4178 /* Enable fast mode */ 4179 s->flags |= __CMPXCHG_DOUBLE; 4180 #endif 4181 4182 /* 4183 * The larger the object size is, the more pages we want on the partial 4184 * list to avoid pounding the page allocator excessively. 4185 */ 4186 set_min_partial(s, ilog2(s->size) / 2); 4187 4188 set_cpu_partial(s); 4189 4190 #ifdef CONFIG_NUMA 4191 s->remote_node_defrag_ratio = 1000; 4192 #endif 4193 4194 /* Initialize the pre-computed randomized freelist if slab is up */ 4195 if (slab_state >= UP) { 4196 if (init_cache_random_seq(s)) 4197 goto error; 4198 } 4199 4200 if (!init_kmem_cache_nodes(s)) 4201 goto error; 4202 4203 if (alloc_kmem_cache_cpus(s)) 4204 return 0; 4205 4206 free_kmem_cache_nodes(s); 4207 error: 4208 return -EINVAL; 4209 } 4210 4211 static void list_slab_objects(struct kmem_cache *s, struct page *page, 4212 const char *text) 4213 { 4214 #ifdef CONFIG_SLUB_DEBUG 4215 void *addr = page_address(page); 4216 unsigned long flags; 4217 unsigned long *map; 4218 void *p; 4219 4220 slab_err(s, page, text, s->name); 4221 slab_lock(page, &flags); 4222 4223 map = get_map(s, page); 4224 for_each_object(p, s, addr, page->objects) { 4225 4226 if (!test_bit(__obj_to_index(s, addr, p), map)) { 4227 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 4228 print_tracking(s, p); 4229 } 4230 } 4231 put_map(map); 4232 slab_unlock(page, &flags); 4233 #endif 4234 } 4235 4236 /* 4237 * Attempt to free all partial slabs on a node. 4238 * This is called from __kmem_cache_shutdown(). We must take list_lock 4239 * because sysfs file might still access partial list after the shutdowning. 4240 */ 4241 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 4242 { 4243 LIST_HEAD(discard); 4244 struct page *page, *h; 4245 4246 BUG_ON(irqs_disabled()); 4247 spin_lock_irq(&n->list_lock); 4248 list_for_each_entry_safe(page, h, &n->partial, slab_list) { 4249 if (!page->inuse) { 4250 remove_partial(n, page); 4251 list_add(&page->slab_list, &discard); 4252 } else { 4253 list_slab_objects(s, page, 4254 "Objects remaining in %s on __kmem_cache_shutdown()"); 4255 } 4256 } 4257 spin_unlock_irq(&n->list_lock); 4258 4259 list_for_each_entry_safe(page, h, &discard, slab_list) 4260 discard_slab(s, page); 4261 } 4262 4263 bool __kmem_cache_empty(struct kmem_cache *s) 4264 { 4265 int node; 4266 struct kmem_cache_node *n; 4267 4268 for_each_kmem_cache_node(s, node, n) 4269 if (n->nr_partial || slabs_node(s, node)) 4270 return false; 4271 return true; 4272 } 4273 4274 /* 4275 * Release all resources used by a slab cache. 4276 */ 4277 int __kmem_cache_shutdown(struct kmem_cache *s) 4278 { 4279 int node; 4280 struct kmem_cache_node *n; 4281 4282 flush_all_cpus_locked(s); 4283 /* Attempt to free all objects */ 4284 for_each_kmem_cache_node(s, node, n) { 4285 free_partial(s, n); 4286 if (n->nr_partial || slabs_node(s, node)) 4287 return 1; 4288 } 4289 return 0; 4290 } 4291 4292 #ifdef CONFIG_PRINTK 4293 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) 4294 { 4295 void *base; 4296 int __maybe_unused i; 4297 unsigned int objnr; 4298 void *objp; 4299 void *objp0; 4300 struct kmem_cache *s = page->slab_cache; 4301 struct track __maybe_unused *trackp; 4302 4303 kpp->kp_ptr = object; 4304 kpp->kp_page = page; 4305 kpp->kp_slab_cache = s; 4306 base = page_address(page); 4307 objp0 = kasan_reset_tag(object); 4308 #ifdef CONFIG_SLUB_DEBUG 4309 objp = restore_red_left(s, objp0); 4310 #else 4311 objp = objp0; 4312 #endif 4313 objnr = obj_to_index(s, page, objp); 4314 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 4315 objp = base + s->size * objnr; 4316 kpp->kp_objp = objp; 4317 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || 4318 !(s->flags & SLAB_STORE_USER)) 4319 return; 4320 #ifdef CONFIG_SLUB_DEBUG 4321 objp = fixup_red_left(s, objp); 4322 trackp = get_track(s, objp, TRACK_ALLOC); 4323 kpp->kp_ret = (void *)trackp->addr; 4324 #ifdef CONFIG_STACKTRACE 4325 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { 4326 kpp->kp_stack[i] = (void *)trackp->addrs[i]; 4327 if (!kpp->kp_stack[i]) 4328 break; 4329 } 4330 4331 trackp = get_track(s, objp, TRACK_FREE); 4332 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { 4333 kpp->kp_free_stack[i] = (void *)trackp->addrs[i]; 4334 if (!kpp->kp_free_stack[i]) 4335 break; 4336 } 4337 #endif 4338 #endif 4339 } 4340 #endif 4341 4342 /******************************************************************** 4343 * Kmalloc subsystem 4344 *******************************************************************/ 4345 4346 static int __init setup_slub_min_order(char *str) 4347 { 4348 get_option(&str, (int *)&slub_min_order); 4349 4350 return 1; 4351 } 4352 4353 __setup("slub_min_order=", setup_slub_min_order); 4354 4355 static int __init setup_slub_max_order(char *str) 4356 { 4357 get_option(&str, (int *)&slub_max_order); 4358 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); 4359 4360 return 1; 4361 } 4362 4363 __setup("slub_max_order=", setup_slub_max_order); 4364 4365 static int __init setup_slub_min_objects(char *str) 4366 { 4367 get_option(&str, (int *)&slub_min_objects); 4368 4369 return 1; 4370 } 4371 4372 __setup("slub_min_objects=", setup_slub_min_objects); 4373 4374 void *__kmalloc(size_t size, gfp_t flags) 4375 { 4376 struct kmem_cache *s; 4377 void *ret; 4378 4379 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4380 return kmalloc_large(size, flags); 4381 4382 s = kmalloc_slab(size, flags); 4383 4384 if (unlikely(ZERO_OR_NULL_PTR(s))) 4385 return s; 4386 4387 ret = slab_alloc(s, flags, _RET_IP_, size); 4388 4389 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 4390 4391 ret = kasan_kmalloc(s, ret, size, flags); 4392 4393 return ret; 4394 } 4395 EXPORT_SYMBOL(__kmalloc); 4396 4397 #ifdef CONFIG_NUMA 4398 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 4399 { 4400 struct page *page; 4401 void *ptr = NULL; 4402 unsigned int order = get_order(size); 4403 4404 flags |= __GFP_COMP; 4405 page = alloc_pages_node(node, flags, order); 4406 if (page) { 4407 ptr = page_address(page); 4408 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, 4409 PAGE_SIZE << order); 4410 } 4411 4412 return kmalloc_large_node_hook(ptr, size, flags); 4413 } 4414 4415 void *__kmalloc_node(size_t size, gfp_t flags, int node) 4416 { 4417 struct kmem_cache *s; 4418 void *ret; 4419 4420 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4421 ret = kmalloc_large_node(size, flags, node); 4422 4423 trace_kmalloc_node(_RET_IP_, ret, 4424 size, PAGE_SIZE << get_order(size), 4425 flags, node); 4426 4427 return ret; 4428 } 4429 4430 s = kmalloc_slab(size, flags); 4431 4432 if (unlikely(ZERO_OR_NULL_PTR(s))) 4433 return s; 4434 4435 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); 4436 4437 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 4438 4439 ret = kasan_kmalloc(s, ret, size, flags); 4440 4441 return ret; 4442 } 4443 EXPORT_SYMBOL(__kmalloc_node); 4444 #endif /* CONFIG_NUMA */ 4445 4446 #ifdef CONFIG_HARDENED_USERCOPY 4447 /* 4448 * Rejects incorrectly sized objects and objects that are to be copied 4449 * to/from userspace but do not fall entirely within the containing slab 4450 * cache's usercopy region. 4451 * 4452 * Returns NULL if check passes, otherwise const char * to name of cache 4453 * to indicate an error. 4454 */ 4455 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4456 bool to_user) 4457 { 4458 struct kmem_cache *s; 4459 unsigned int offset; 4460 size_t object_size; 4461 bool is_kfence = is_kfence_address(ptr); 4462 4463 ptr = kasan_reset_tag(ptr); 4464 4465 /* Find object and usable object size. */ 4466 s = page->slab_cache; 4467 4468 /* Reject impossible pointers. */ 4469 if (ptr < page_address(page)) 4470 usercopy_abort("SLUB object not in SLUB page?!", NULL, 4471 to_user, 0, n); 4472 4473 /* Find offset within object. */ 4474 if (is_kfence) 4475 offset = ptr - kfence_object_start(ptr); 4476 else 4477 offset = (ptr - page_address(page)) % s->size; 4478 4479 /* Adjust for redzone and reject if within the redzone. */ 4480 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 4481 if (offset < s->red_left_pad) 4482 usercopy_abort("SLUB object in left red zone", 4483 s->name, to_user, offset, n); 4484 offset -= s->red_left_pad; 4485 } 4486 4487 /* Allow address range falling entirely within usercopy region. */ 4488 if (offset >= s->useroffset && 4489 offset - s->useroffset <= s->usersize && 4490 n <= s->useroffset - offset + s->usersize) 4491 return; 4492 4493 /* 4494 * If the copy is still within the allocated object, produce 4495 * a warning instead of rejecting the copy. This is intended 4496 * to be a temporary method to find any missing usercopy 4497 * whitelists. 4498 */ 4499 object_size = slab_ksize(s); 4500 if (usercopy_fallback && 4501 offset <= object_size && n <= object_size - offset) { 4502 usercopy_warn("SLUB object", s->name, to_user, offset, n); 4503 return; 4504 } 4505 4506 usercopy_abort("SLUB object", s->name, to_user, offset, n); 4507 } 4508 #endif /* CONFIG_HARDENED_USERCOPY */ 4509 4510 size_t __ksize(const void *object) 4511 { 4512 struct page *page; 4513 4514 if (unlikely(object == ZERO_SIZE_PTR)) 4515 return 0; 4516 4517 page = virt_to_head_page(object); 4518 4519 if (unlikely(!PageSlab(page))) { 4520 WARN_ON(!PageCompound(page)); 4521 return page_size(page); 4522 } 4523 4524 return slab_ksize(page->slab_cache); 4525 } 4526 EXPORT_SYMBOL(__ksize); 4527 4528 void kfree(const void *x) 4529 { 4530 struct page *page; 4531 void *object = (void *)x; 4532 4533 trace_kfree(_RET_IP_, x); 4534 4535 if (unlikely(ZERO_OR_NULL_PTR(x))) 4536 return; 4537 4538 page = virt_to_head_page(x); 4539 if (unlikely(!PageSlab(page))) { 4540 free_nonslab_page(page, object); 4541 return; 4542 } 4543 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); 4544 } 4545 EXPORT_SYMBOL(kfree); 4546 4547 #define SHRINK_PROMOTE_MAX 32 4548 4549 /* 4550 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 4551 * up most to the head of the partial lists. New allocations will then 4552 * fill those up and thus they can be removed from the partial lists. 4553 * 4554 * The slabs with the least items are placed last. This results in them 4555 * being allocated from last increasing the chance that the last objects 4556 * are freed in them. 4557 */ 4558 static int __kmem_cache_do_shrink(struct kmem_cache *s) 4559 { 4560 int node; 4561 int i; 4562 struct kmem_cache_node *n; 4563 struct page *page; 4564 struct page *t; 4565 struct list_head discard; 4566 struct list_head promote[SHRINK_PROMOTE_MAX]; 4567 unsigned long flags; 4568 int ret = 0; 4569 4570 for_each_kmem_cache_node(s, node, n) { 4571 INIT_LIST_HEAD(&discard); 4572 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 4573 INIT_LIST_HEAD(promote + i); 4574 4575 spin_lock_irqsave(&n->list_lock, flags); 4576 4577 /* 4578 * Build lists of slabs to discard or promote. 4579 * 4580 * Note that concurrent frees may occur while we hold the 4581 * list_lock. page->inuse here is the upper limit. 4582 */ 4583 list_for_each_entry_safe(page, t, &n->partial, slab_list) { 4584 int free = page->objects - page->inuse; 4585 4586 /* Do not reread page->inuse */ 4587 barrier(); 4588 4589 /* We do not keep full slabs on the list */ 4590 BUG_ON(free <= 0); 4591 4592 if (free == page->objects) { 4593 list_move(&page->slab_list, &discard); 4594 n->nr_partial--; 4595 } else if (free <= SHRINK_PROMOTE_MAX) 4596 list_move(&page->slab_list, promote + free - 1); 4597 } 4598 4599 /* 4600 * Promote the slabs filled up most to the head of the 4601 * partial list. 4602 */ 4603 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 4604 list_splice(promote + i, &n->partial); 4605 4606 spin_unlock_irqrestore(&n->list_lock, flags); 4607 4608 /* Release empty slabs */ 4609 list_for_each_entry_safe(page, t, &discard, slab_list) 4610 discard_slab(s, page); 4611 4612 if (slabs_node(s, node)) 4613 ret = 1; 4614 } 4615 4616 return ret; 4617 } 4618 4619 int __kmem_cache_shrink(struct kmem_cache *s) 4620 { 4621 flush_all(s); 4622 return __kmem_cache_do_shrink(s); 4623 } 4624 4625 static int slab_mem_going_offline_callback(void *arg) 4626 { 4627 struct kmem_cache *s; 4628 4629 mutex_lock(&slab_mutex); 4630 list_for_each_entry(s, &slab_caches, list) { 4631 flush_all_cpus_locked(s); 4632 __kmem_cache_do_shrink(s); 4633 } 4634 mutex_unlock(&slab_mutex); 4635 4636 return 0; 4637 } 4638 4639 static void slab_mem_offline_callback(void *arg) 4640 { 4641 struct memory_notify *marg = arg; 4642 int offline_node; 4643 4644 offline_node = marg->status_change_nid_normal; 4645 4646 /* 4647 * If the node still has available memory. we need kmem_cache_node 4648 * for it yet. 4649 */ 4650 if (offline_node < 0) 4651 return; 4652 4653 mutex_lock(&slab_mutex); 4654 node_clear(offline_node, slab_nodes); 4655 /* 4656 * We no longer free kmem_cache_node structures here, as it would be 4657 * racy with all get_node() users, and infeasible to protect them with 4658 * slab_mutex. 4659 */ 4660 mutex_unlock(&slab_mutex); 4661 } 4662 4663 static int slab_mem_going_online_callback(void *arg) 4664 { 4665 struct kmem_cache_node *n; 4666 struct kmem_cache *s; 4667 struct memory_notify *marg = arg; 4668 int nid = marg->status_change_nid_normal; 4669 int ret = 0; 4670 4671 /* 4672 * If the node's memory is already available, then kmem_cache_node is 4673 * already created. Nothing to do. 4674 */ 4675 if (nid < 0) 4676 return 0; 4677 4678 /* 4679 * We are bringing a node online. No memory is available yet. We must 4680 * allocate a kmem_cache_node structure in order to bring the node 4681 * online. 4682 */ 4683 mutex_lock(&slab_mutex); 4684 list_for_each_entry(s, &slab_caches, list) { 4685 /* 4686 * The structure may already exist if the node was previously 4687 * onlined and offlined. 4688 */ 4689 if (get_node(s, nid)) 4690 continue; 4691 /* 4692 * XXX: kmem_cache_alloc_node will fallback to other nodes 4693 * since memory is not yet available from the node that 4694 * is brought up. 4695 */ 4696 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 4697 if (!n) { 4698 ret = -ENOMEM; 4699 goto out; 4700 } 4701 init_kmem_cache_node(n); 4702 s->node[nid] = n; 4703 } 4704 /* 4705 * Any cache created after this point will also have kmem_cache_node 4706 * initialized for the new node. 4707 */ 4708 node_set(nid, slab_nodes); 4709 out: 4710 mutex_unlock(&slab_mutex); 4711 return ret; 4712 } 4713 4714 static int slab_memory_callback(struct notifier_block *self, 4715 unsigned long action, void *arg) 4716 { 4717 int ret = 0; 4718 4719 switch (action) { 4720 case MEM_GOING_ONLINE: 4721 ret = slab_mem_going_online_callback(arg); 4722 break; 4723 case MEM_GOING_OFFLINE: 4724 ret = slab_mem_going_offline_callback(arg); 4725 break; 4726 case MEM_OFFLINE: 4727 case MEM_CANCEL_ONLINE: 4728 slab_mem_offline_callback(arg); 4729 break; 4730 case MEM_ONLINE: 4731 case MEM_CANCEL_OFFLINE: 4732 break; 4733 } 4734 if (ret) 4735 ret = notifier_from_errno(ret); 4736 else 4737 ret = NOTIFY_OK; 4738 return ret; 4739 } 4740 4741 static struct notifier_block slab_memory_callback_nb = { 4742 .notifier_call = slab_memory_callback, 4743 .priority = SLAB_CALLBACK_PRI, 4744 }; 4745 4746 /******************************************************************** 4747 * Basic setup of slabs 4748 *******************************************************************/ 4749 4750 /* 4751 * Used for early kmem_cache structures that were allocated using 4752 * the page allocator. Allocate them properly then fix up the pointers 4753 * that may be pointing to the wrong kmem_cache structure. 4754 */ 4755 4756 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 4757 { 4758 int node; 4759 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 4760 struct kmem_cache_node *n; 4761 4762 memcpy(s, static_cache, kmem_cache->object_size); 4763 4764 /* 4765 * This runs very early, and only the boot processor is supposed to be 4766 * up. Even if it weren't true, IRQs are not up so we couldn't fire 4767 * IPIs around. 4768 */ 4769 __flush_cpu_slab(s, smp_processor_id()); 4770 for_each_kmem_cache_node(s, node, n) { 4771 struct page *p; 4772 4773 list_for_each_entry(p, &n->partial, slab_list) 4774 p->slab_cache = s; 4775 4776 #ifdef CONFIG_SLUB_DEBUG 4777 list_for_each_entry(p, &n->full, slab_list) 4778 p->slab_cache = s; 4779 #endif 4780 } 4781 list_add(&s->list, &slab_caches); 4782 return s; 4783 } 4784 4785 void __init kmem_cache_init(void) 4786 { 4787 static __initdata struct kmem_cache boot_kmem_cache, 4788 boot_kmem_cache_node; 4789 int node; 4790 4791 if (debug_guardpage_minorder()) 4792 slub_max_order = 0; 4793 4794 /* Print slub debugging pointers without hashing */ 4795 if (__slub_debug_enabled()) 4796 no_hash_pointers_enable(NULL); 4797 4798 kmem_cache_node = &boot_kmem_cache_node; 4799 kmem_cache = &boot_kmem_cache; 4800 4801 /* 4802 * Initialize the nodemask for which we will allocate per node 4803 * structures. Here we don't need taking slab_mutex yet. 4804 */ 4805 for_each_node_state(node, N_NORMAL_MEMORY) 4806 node_set(node, slab_nodes); 4807 4808 create_boot_cache(kmem_cache_node, "kmem_cache_node", 4809 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); 4810 4811 register_hotmemory_notifier(&slab_memory_callback_nb); 4812 4813 /* Able to allocate the per node structures */ 4814 slab_state = PARTIAL; 4815 4816 create_boot_cache(kmem_cache, "kmem_cache", 4817 offsetof(struct kmem_cache, node) + 4818 nr_node_ids * sizeof(struct kmem_cache_node *), 4819 SLAB_HWCACHE_ALIGN, 0, 0); 4820 4821 kmem_cache = bootstrap(&boot_kmem_cache); 4822 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 4823 4824 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 4825 setup_kmalloc_cache_index_table(); 4826 create_kmalloc_caches(0); 4827 4828 /* Setup random freelists for each cache */ 4829 init_freelist_randomization(); 4830 4831 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 4832 slub_cpu_dead); 4833 4834 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 4835 cache_line_size(), 4836 slub_min_order, slub_max_order, slub_min_objects, 4837 nr_cpu_ids, nr_node_ids); 4838 } 4839 4840 void __init kmem_cache_init_late(void) 4841 { 4842 } 4843 4844 struct kmem_cache * 4845 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 4846 slab_flags_t flags, void (*ctor)(void *)) 4847 { 4848 struct kmem_cache *s; 4849 4850 s = find_mergeable(size, align, flags, name, ctor); 4851 if (s) { 4852 s->refcount++; 4853 4854 /* 4855 * Adjust the object sizes so that we clear 4856 * the complete object on kzalloc. 4857 */ 4858 s->object_size = max(s->object_size, size); 4859 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 4860 4861 if (sysfs_slab_alias(s, name)) { 4862 s->refcount--; 4863 s = NULL; 4864 } 4865 } 4866 4867 return s; 4868 } 4869 4870 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4871 { 4872 int err; 4873 4874 err = kmem_cache_open(s, flags); 4875 if (err) 4876 return err; 4877 4878 /* Mutex is not taken during early boot */ 4879 if (slab_state <= UP) 4880 return 0; 4881 4882 err = sysfs_slab_add(s); 4883 if (err) 4884 __kmem_cache_release(s); 4885 4886 if (s->flags & SLAB_STORE_USER) 4887 debugfs_slab_add(s); 4888 4889 return err; 4890 } 4891 4892 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4893 { 4894 struct kmem_cache *s; 4895 void *ret; 4896 4897 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 4898 return kmalloc_large(size, gfpflags); 4899 4900 s = kmalloc_slab(size, gfpflags); 4901 4902 if (unlikely(ZERO_OR_NULL_PTR(s))) 4903 return s; 4904 4905 ret = slab_alloc(s, gfpflags, caller, size); 4906 4907 /* Honor the call site pointer we received. */ 4908 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4909 4910 return ret; 4911 } 4912 EXPORT_SYMBOL(__kmalloc_track_caller); 4913 4914 #ifdef CONFIG_NUMA 4915 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 4916 int node, unsigned long caller) 4917 { 4918 struct kmem_cache *s; 4919 void *ret; 4920 4921 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4922 ret = kmalloc_large_node(size, gfpflags, node); 4923 4924 trace_kmalloc_node(caller, ret, 4925 size, PAGE_SIZE << get_order(size), 4926 gfpflags, node); 4927 4928 return ret; 4929 } 4930 4931 s = kmalloc_slab(size, gfpflags); 4932 4933 if (unlikely(ZERO_OR_NULL_PTR(s))) 4934 return s; 4935 4936 ret = slab_alloc_node(s, gfpflags, node, caller, size); 4937 4938 /* Honor the call site pointer we received. */ 4939 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4940 4941 return ret; 4942 } 4943 EXPORT_SYMBOL(__kmalloc_node_track_caller); 4944 #endif 4945 4946 #ifdef CONFIG_SYSFS 4947 static int count_inuse(struct page *page) 4948 { 4949 return page->inuse; 4950 } 4951 4952 static int count_total(struct page *page) 4953 { 4954 return page->objects; 4955 } 4956 #endif 4957 4958 #ifdef CONFIG_SLUB_DEBUG 4959 static void validate_slab(struct kmem_cache *s, struct page *page, 4960 unsigned long *obj_map) 4961 { 4962 void *p; 4963 void *addr = page_address(page); 4964 unsigned long flags; 4965 4966 slab_lock(page, &flags); 4967 4968 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) 4969 goto unlock; 4970 4971 /* Now we know that a valid freelist exists */ 4972 __fill_map(obj_map, s, page); 4973 for_each_object(p, s, addr, page->objects) { 4974 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 4975 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 4976 4977 if (!check_object(s, page, p, val)) 4978 break; 4979 } 4980 unlock: 4981 slab_unlock(page, &flags); 4982 } 4983 4984 static int validate_slab_node(struct kmem_cache *s, 4985 struct kmem_cache_node *n, unsigned long *obj_map) 4986 { 4987 unsigned long count = 0; 4988 struct page *page; 4989 unsigned long flags; 4990 4991 spin_lock_irqsave(&n->list_lock, flags); 4992 4993 list_for_each_entry(page, &n->partial, slab_list) { 4994 validate_slab(s, page, obj_map); 4995 count++; 4996 } 4997 if (count != n->nr_partial) { 4998 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 4999 s->name, count, n->nr_partial); 5000 slab_add_kunit_errors(); 5001 } 5002 5003 if (!(s->flags & SLAB_STORE_USER)) 5004 goto out; 5005 5006 list_for_each_entry(page, &n->full, slab_list) { 5007 validate_slab(s, page, obj_map); 5008 count++; 5009 } 5010 if (count != atomic_long_read(&n->nr_slabs)) { 5011 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5012 s->name, count, atomic_long_read(&n->nr_slabs)); 5013 slab_add_kunit_errors(); 5014 } 5015 5016 out: 5017 spin_unlock_irqrestore(&n->list_lock, flags); 5018 return count; 5019 } 5020 5021 long validate_slab_cache(struct kmem_cache *s) 5022 { 5023 int node; 5024 unsigned long count = 0; 5025 struct kmem_cache_node *n; 5026 unsigned long *obj_map; 5027 5028 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 5029 if (!obj_map) 5030 return -ENOMEM; 5031 5032 flush_all(s); 5033 for_each_kmem_cache_node(s, node, n) 5034 count += validate_slab_node(s, n, obj_map); 5035 5036 bitmap_free(obj_map); 5037 5038 return count; 5039 } 5040 EXPORT_SYMBOL(validate_slab_cache); 5041 5042 #ifdef CONFIG_DEBUG_FS 5043 /* 5044 * Generate lists of code addresses where slabcache objects are allocated 5045 * and freed. 5046 */ 5047 5048 struct location { 5049 unsigned long count; 5050 unsigned long addr; 5051 long long sum_time; 5052 long min_time; 5053 long max_time; 5054 long min_pid; 5055 long max_pid; 5056 DECLARE_BITMAP(cpus, NR_CPUS); 5057 nodemask_t nodes; 5058 }; 5059 5060 struct loc_track { 5061 unsigned long max; 5062 unsigned long count; 5063 struct location *loc; 5064 }; 5065 5066 static struct dentry *slab_debugfs_root; 5067 5068 static void free_loc_track(struct loc_track *t) 5069 { 5070 if (t->max) 5071 free_pages((unsigned long)t->loc, 5072 get_order(sizeof(struct location) * t->max)); 5073 } 5074 5075 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 5076 { 5077 struct location *l; 5078 int order; 5079 5080 order = get_order(sizeof(struct location) * max); 5081 5082 l = (void *)__get_free_pages(flags, order); 5083 if (!l) 5084 return 0; 5085 5086 if (t->count) { 5087 memcpy(l, t->loc, sizeof(struct location) * t->count); 5088 free_loc_track(t); 5089 } 5090 t->max = max; 5091 t->loc = l; 5092 return 1; 5093 } 5094 5095 static int add_location(struct loc_track *t, struct kmem_cache *s, 5096 const struct track *track) 5097 { 5098 long start, end, pos; 5099 struct location *l; 5100 unsigned long caddr; 5101 unsigned long age = jiffies - track->when; 5102 5103 start = -1; 5104 end = t->count; 5105 5106 for ( ; ; ) { 5107 pos = start + (end - start + 1) / 2; 5108 5109 /* 5110 * There is nothing at "end". If we end up there 5111 * we need to add something to before end. 5112 */ 5113 if (pos == end) 5114 break; 5115 5116 caddr = t->loc[pos].addr; 5117 if (track->addr == caddr) { 5118 5119 l = &t->loc[pos]; 5120 l->count++; 5121 if (track->when) { 5122 l->sum_time += age; 5123 if (age < l->min_time) 5124 l->min_time = age; 5125 if (age > l->max_time) 5126 l->max_time = age; 5127 5128 if (track->pid < l->min_pid) 5129 l->min_pid = track->pid; 5130 if (track->pid > l->max_pid) 5131 l->max_pid = track->pid; 5132 5133 cpumask_set_cpu(track->cpu, 5134 to_cpumask(l->cpus)); 5135 } 5136 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5137 return 1; 5138 } 5139 5140 if (track->addr < caddr) 5141 end = pos; 5142 else 5143 start = pos; 5144 } 5145 5146 /* 5147 * Not found. Insert new tracking element. 5148 */ 5149 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 5150 return 0; 5151 5152 l = t->loc + pos; 5153 if (pos < t->count) 5154 memmove(l + 1, l, 5155 (t->count - pos) * sizeof(struct location)); 5156 t->count++; 5157 l->count = 1; 5158 l->addr = track->addr; 5159 l->sum_time = age; 5160 l->min_time = age; 5161 l->max_time = age; 5162 l->min_pid = track->pid; 5163 l->max_pid = track->pid; 5164 cpumask_clear(to_cpumask(l->cpus)); 5165 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 5166 nodes_clear(l->nodes); 5167 node_set(page_to_nid(virt_to_page(track)), l->nodes); 5168 return 1; 5169 } 5170 5171 static void process_slab(struct loc_track *t, struct kmem_cache *s, 5172 struct page *page, enum track_item alloc, 5173 unsigned long *obj_map) 5174 { 5175 void *addr = page_address(page); 5176 void *p; 5177 5178 __fill_map(obj_map, s, page); 5179 5180 for_each_object(p, s, addr, page->objects) 5181 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 5182 add_location(t, s, get_track(s, p, alloc)); 5183 } 5184 #endif /* CONFIG_DEBUG_FS */ 5185 #endif /* CONFIG_SLUB_DEBUG */ 5186 5187 #ifdef CONFIG_SYSFS 5188 enum slab_stat_type { 5189 SL_ALL, /* All slabs */ 5190 SL_PARTIAL, /* Only partially allocated slabs */ 5191 SL_CPU, /* Only slabs used for cpu caches */ 5192 SL_OBJECTS, /* Determine allocated objects not slabs */ 5193 SL_TOTAL /* Determine object capacity not slabs */ 5194 }; 5195 5196 #define SO_ALL (1 << SL_ALL) 5197 #define SO_PARTIAL (1 << SL_PARTIAL) 5198 #define SO_CPU (1 << SL_CPU) 5199 #define SO_OBJECTS (1 << SL_OBJECTS) 5200 #define SO_TOTAL (1 << SL_TOTAL) 5201 5202 static ssize_t show_slab_objects(struct kmem_cache *s, 5203 char *buf, unsigned long flags) 5204 { 5205 unsigned long total = 0; 5206 int node; 5207 int x; 5208 unsigned long *nodes; 5209 int len = 0; 5210 5211 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 5212 if (!nodes) 5213 return -ENOMEM; 5214 5215 if (flags & SO_CPU) { 5216 int cpu; 5217 5218 for_each_possible_cpu(cpu) { 5219 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 5220 cpu); 5221 int node; 5222 struct page *page; 5223 5224 page = READ_ONCE(c->page); 5225 if (!page) 5226 continue; 5227 5228 node = page_to_nid(page); 5229 if (flags & SO_TOTAL) 5230 x = page->objects; 5231 else if (flags & SO_OBJECTS) 5232 x = page->inuse; 5233 else 5234 x = 1; 5235 5236 total += x; 5237 nodes[node] += x; 5238 5239 page = slub_percpu_partial_read_once(c); 5240 if (page) { 5241 node = page_to_nid(page); 5242 if (flags & SO_TOTAL) 5243 WARN_ON_ONCE(1); 5244 else if (flags & SO_OBJECTS) 5245 WARN_ON_ONCE(1); 5246 else 5247 x = page->pages; 5248 total += x; 5249 nodes[node] += x; 5250 } 5251 } 5252 } 5253 5254 /* 5255 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 5256 * already held which will conflict with an existing lock order: 5257 * 5258 * mem_hotplug_lock->slab_mutex->kernfs_mutex 5259 * 5260 * We don't really need mem_hotplug_lock (to hold off 5261 * slab_mem_going_offline_callback) here because slab's memory hot 5262 * unplug code doesn't destroy the kmem_cache->node[] data. 5263 */ 5264 5265 #ifdef CONFIG_SLUB_DEBUG 5266 if (flags & SO_ALL) { 5267 struct kmem_cache_node *n; 5268 5269 for_each_kmem_cache_node(s, node, n) { 5270 5271 if (flags & SO_TOTAL) 5272 x = atomic_long_read(&n->total_objects); 5273 else if (flags & SO_OBJECTS) 5274 x = atomic_long_read(&n->total_objects) - 5275 count_partial(n, count_free); 5276 else 5277 x = atomic_long_read(&n->nr_slabs); 5278 total += x; 5279 nodes[node] += x; 5280 } 5281 5282 } else 5283 #endif 5284 if (flags & SO_PARTIAL) { 5285 struct kmem_cache_node *n; 5286 5287 for_each_kmem_cache_node(s, node, n) { 5288 if (flags & SO_TOTAL) 5289 x = count_partial(n, count_total); 5290 else if (flags & SO_OBJECTS) 5291 x = count_partial(n, count_inuse); 5292 else 5293 x = n->nr_partial; 5294 total += x; 5295 nodes[node] += x; 5296 } 5297 } 5298 5299 len += sysfs_emit_at(buf, len, "%lu", total); 5300 #ifdef CONFIG_NUMA 5301 for (node = 0; node < nr_node_ids; node++) { 5302 if (nodes[node]) 5303 len += sysfs_emit_at(buf, len, " N%d=%lu", 5304 node, nodes[node]); 5305 } 5306 #endif 5307 len += sysfs_emit_at(buf, len, "\n"); 5308 kfree(nodes); 5309 5310 return len; 5311 } 5312 5313 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 5314 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 5315 5316 struct slab_attribute { 5317 struct attribute attr; 5318 ssize_t (*show)(struct kmem_cache *s, char *buf); 5319 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 5320 }; 5321 5322 #define SLAB_ATTR_RO(_name) \ 5323 static struct slab_attribute _name##_attr = \ 5324 __ATTR(_name, 0400, _name##_show, NULL) 5325 5326 #define SLAB_ATTR(_name) \ 5327 static struct slab_attribute _name##_attr = \ 5328 __ATTR(_name, 0600, _name##_show, _name##_store) 5329 5330 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 5331 { 5332 return sysfs_emit(buf, "%u\n", s->size); 5333 } 5334 SLAB_ATTR_RO(slab_size); 5335 5336 static ssize_t align_show(struct kmem_cache *s, char *buf) 5337 { 5338 return sysfs_emit(buf, "%u\n", s->align); 5339 } 5340 SLAB_ATTR_RO(align); 5341 5342 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 5343 { 5344 return sysfs_emit(buf, "%u\n", s->object_size); 5345 } 5346 SLAB_ATTR_RO(object_size); 5347 5348 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 5349 { 5350 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 5351 } 5352 SLAB_ATTR_RO(objs_per_slab); 5353 5354 static ssize_t order_show(struct kmem_cache *s, char *buf) 5355 { 5356 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 5357 } 5358 SLAB_ATTR_RO(order); 5359 5360 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 5361 { 5362 return sysfs_emit(buf, "%lu\n", s->min_partial); 5363 } 5364 5365 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 5366 size_t length) 5367 { 5368 unsigned long min; 5369 int err; 5370 5371 err = kstrtoul(buf, 10, &min); 5372 if (err) 5373 return err; 5374 5375 set_min_partial(s, min); 5376 return length; 5377 } 5378 SLAB_ATTR(min_partial); 5379 5380 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 5381 { 5382 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s)); 5383 } 5384 5385 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 5386 size_t length) 5387 { 5388 unsigned int objects; 5389 int err; 5390 5391 err = kstrtouint(buf, 10, &objects); 5392 if (err) 5393 return err; 5394 if (objects && !kmem_cache_has_cpu_partial(s)) 5395 return -EINVAL; 5396 5397 slub_set_cpu_partial(s, objects); 5398 flush_all(s); 5399 return length; 5400 } 5401 SLAB_ATTR(cpu_partial); 5402 5403 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 5404 { 5405 if (!s->ctor) 5406 return 0; 5407 return sysfs_emit(buf, "%pS\n", s->ctor); 5408 } 5409 SLAB_ATTR_RO(ctor); 5410 5411 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 5412 { 5413 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 5414 } 5415 SLAB_ATTR_RO(aliases); 5416 5417 static ssize_t partial_show(struct kmem_cache *s, char *buf) 5418 { 5419 return show_slab_objects(s, buf, SO_PARTIAL); 5420 } 5421 SLAB_ATTR_RO(partial); 5422 5423 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 5424 { 5425 return show_slab_objects(s, buf, SO_CPU); 5426 } 5427 SLAB_ATTR_RO(cpu_slabs); 5428 5429 static ssize_t objects_show(struct kmem_cache *s, char *buf) 5430 { 5431 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 5432 } 5433 SLAB_ATTR_RO(objects); 5434 5435 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 5436 { 5437 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 5438 } 5439 SLAB_ATTR_RO(objects_partial); 5440 5441 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 5442 { 5443 int objects = 0; 5444 int pages = 0; 5445 int cpu; 5446 int len = 0; 5447 5448 for_each_online_cpu(cpu) { 5449 struct page *page; 5450 5451 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5452 5453 if (page) { 5454 pages += page->pages; 5455 objects += page->pobjects; 5456 } 5457 } 5458 5459 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages); 5460 5461 #ifdef CONFIG_SMP 5462 for_each_online_cpu(cpu) { 5463 struct page *page; 5464 5465 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 5466 if (page) 5467 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 5468 cpu, page->pobjects, page->pages); 5469 } 5470 #endif 5471 len += sysfs_emit_at(buf, len, "\n"); 5472 5473 return len; 5474 } 5475 SLAB_ATTR_RO(slabs_cpu_partial); 5476 5477 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 5478 { 5479 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 5480 } 5481 SLAB_ATTR_RO(reclaim_account); 5482 5483 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 5484 { 5485 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 5486 } 5487 SLAB_ATTR_RO(hwcache_align); 5488 5489 #ifdef CONFIG_ZONE_DMA 5490 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 5491 { 5492 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 5493 } 5494 SLAB_ATTR_RO(cache_dma); 5495 #endif 5496 5497 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 5498 { 5499 return sysfs_emit(buf, "%u\n", s->usersize); 5500 } 5501 SLAB_ATTR_RO(usersize); 5502 5503 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 5504 { 5505 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 5506 } 5507 SLAB_ATTR_RO(destroy_by_rcu); 5508 5509 #ifdef CONFIG_SLUB_DEBUG 5510 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 5511 { 5512 return show_slab_objects(s, buf, SO_ALL); 5513 } 5514 SLAB_ATTR_RO(slabs); 5515 5516 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 5517 { 5518 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 5519 } 5520 SLAB_ATTR_RO(total_objects); 5521 5522 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 5523 { 5524 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 5525 } 5526 SLAB_ATTR_RO(sanity_checks); 5527 5528 static ssize_t trace_show(struct kmem_cache *s, char *buf) 5529 { 5530 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 5531 } 5532 SLAB_ATTR_RO(trace); 5533 5534 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 5535 { 5536 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 5537 } 5538 5539 SLAB_ATTR_RO(red_zone); 5540 5541 static ssize_t poison_show(struct kmem_cache *s, char *buf) 5542 { 5543 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 5544 } 5545 5546 SLAB_ATTR_RO(poison); 5547 5548 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 5549 { 5550 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 5551 } 5552 5553 SLAB_ATTR_RO(store_user); 5554 5555 static ssize_t validate_show(struct kmem_cache *s, char *buf) 5556 { 5557 return 0; 5558 } 5559 5560 static ssize_t validate_store(struct kmem_cache *s, 5561 const char *buf, size_t length) 5562 { 5563 int ret = -EINVAL; 5564 5565 if (buf[0] == '1') { 5566 ret = validate_slab_cache(s); 5567 if (ret >= 0) 5568 ret = length; 5569 } 5570 return ret; 5571 } 5572 SLAB_ATTR(validate); 5573 5574 #endif /* CONFIG_SLUB_DEBUG */ 5575 5576 #ifdef CONFIG_FAILSLAB 5577 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 5578 { 5579 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 5580 } 5581 SLAB_ATTR_RO(failslab); 5582 #endif 5583 5584 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 5585 { 5586 return 0; 5587 } 5588 5589 static ssize_t shrink_store(struct kmem_cache *s, 5590 const char *buf, size_t length) 5591 { 5592 if (buf[0] == '1') 5593 kmem_cache_shrink(s); 5594 else 5595 return -EINVAL; 5596 return length; 5597 } 5598 SLAB_ATTR(shrink); 5599 5600 #ifdef CONFIG_NUMA 5601 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 5602 { 5603 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 5604 } 5605 5606 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 5607 const char *buf, size_t length) 5608 { 5609 unsigned int ratio; 5610 int err; 5611 5612 err = kstrtouint(buf, 10, &ratio); 5613 if (err) 5614 return err; 5615 if (ratio > 100) 5616 return -ERANGE; 5617 5618 s->remote_node_defrag_ratio = ratio * 10; 5619 5620 return length; 5621 } 5622 SLAB_ATTR(remote_node_defrag_ratio); 5623 #endif 5624 5625 #ifdef CONFIG_SLUB_STATS 5626 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 5627 { 5628 unsigned long sum = 0; 5629 int cpu; 5630 int len = 0; 5631 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 5632 5633 if (!data) 5634 return -ENOMEM; 5635 5636 for_each_online_cpu(cpu) { 5637 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 5638 5639 data[cpu] = x; 5640 sum += x; 5641 } 5642 5643 len += sysfs_emit_at(buf, len, "%lu", sum); 5644 5645 #ifdef CONFIG_SMP 5646 for_each_online_cpu(cpu) { 5647 if (data[cpu]) 5648 len += sysfs_emit_at(buf, len, " C%d=%u", 5649 cpu, data[cpu]); 5650 } 5651 #endif 5652 kfree(data); 5653 len += sysfs_emit_at(buf, len, "\n"); 5654 5655 return len; 5656 } 5657 5658 static void clear_stat(struct kmem_cache *s, enum stat_item si) 5659 { 5660 int cpu; 5661 5662 for_each_online_cpu(cpu) 5663 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 5664 } 5665 5666 #define STAT_ATTR(si, text) \ 5667 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 5668 { \ 5669 return show_stat(s, buf, si); \ 5670 } \ 5671 static ssize_t text##_store(struct kmem_cache *s, \ 5672 const char *buf, size_t length) \ 5673 { \ 5674 if (buf[0] != '0') \ 5675 return -EINVAL; \ 5676 clear_stat(s, si); \ 5677 return length; \ 5678 } \ 5679 SLAB_ATTR(text); \ 5680 5681 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 5682 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 5683 STAT_ATTR(FREE_FASTPATH, free_fastpath); 5684 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 5685 STAT_ATTR(FREE_FROZEN, free_frozen); 5686 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 5687 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 5688 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 5689 STAT_ATTR(ALLOC_SLAB, alloc_slab); 5690 STAT_ATTR(ALLOC_REFILL, alloc_refill); 5691 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 5692 STAT_ATTR(FREE_SLAB, free_slab); 5693 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 5694 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 5695 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 5696 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 5697 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 5698 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 5699 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 5700 STAT_ATTR(ORDER_FALLBACK, order_fallback); 5701 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 5702 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 5703 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 5704 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 5705 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 5706 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 5707 #endif /* CONFIG_SLUB_STATS */ 5708 5709 static struct attribute *slab_attrs[] = { 5710 &slab_size_attr.attr, 5711 &object_size_attr.attr, 5712 &objs_per_slab_attr.attr, 5713 &order_attr.attr, 5714 &min_partial_attr.attr, 5715 &cpu_partial_attr.attr, 5716 &objects_attr.attr, 5717 &objects_partial_attr.attr, 5718 &partial_attr.attr, 5719 &cpu_slabs_attr.attr, 5720 &ctor_attr.attr, 5721 &aliases_attr.attr, 5722 &align_attr.attr, 5723 &hwcache_align_attr.attr, 5724 &reclaim_account_attr.attr, 5725 &destroy_by_rcu_attr.attr, 5726 &shrink_attr.attr, 5727 &slabs_cpu_partial_attr.attr, 5728 #ifdef CONFIG_SLUB_DEBUG 5729 &total_objects_attr.attr, 5730 &slabs_attr.attr, 5731 &sanity_checks_attr.attr, 5732 &trace_attr.attr, 5733 &red_zone_attr.attr, 5734 &poison_attr.attr, 5735 &store_user_attr.attr, 5736 &validate_attr.attr, 5737 #endif 5738 #ifdef CONFIG_ZONE_DMA 5739 &cache_dma_attr.attr, 5740 #endif 5741 #ifdef CONFIG_NUMA 5742 &remote_node_defrag_ratio_attr.attr, 5743 #endif 5744 #ifdef CONFIG_SLUB_STATS 5745 &alloc_fastpath_attr.attr, 5746 &alloc_slowpath_attr.attr, 5747 &free_fastpath_attr.attr, 5748 &free_slowpath_attr.attr, 5749 &free_frozen_attr.attr, 5750 &free_add_partial_attr.attr, 5751 &free_remove_partial_attr.attr, 5752 &alloc_from_partial_attr.attr, 5753 &alloc_slab_attr.attr, 5754 &alloc_refill_attr.attr, 5755 &alloc_node_mismatch_attr.attr, 5756 &free_slab_attr.attr, 5757 &cpuslab_flush_attr.attr, 5758 &deactivate_full_attr.attr, 5759 &deactivate_empty_attr.attr, 5760 &deactivate_to_head_attr.attr, 5761 &deactivate_to_tail_attr.attr, 5762 &deactivate_remote_frees_attr.attr, 5763 &deactivate_bypass_attr.attr, 5764 &order_fallback_attr.attr, 5765 &cmpxchg_double_fail_attr.attr, 5766 &cmpxchg_double_cpu_fail_attr.attr, 5767 &cpu_partial_alloc_attr.attr, 5768 &cpu_partial_free_attr.attr, 5769 &cpu_partial_node_attr.attr, 5770 &cpu_partial_drain_attr.attr, 5771 #endif 5772 #ifdef CONFIG_FAILSLAB 5773 &failslab_attr.attr, 5774 #endif 5775 &usersize_attr.attr, 5776 5777 NULL 5778 }; 5779 5780 static const struct attribute_group slab_attr_group = { 5781 .attrs = slab_attrs, 5782 }; 5783 5784 static ssize_t slab_attr_show(struct kobject *kobj, 5785 struct attribute *attr, 5786 char *buf) 5787 { 5788 struct slab_attribute *attribute; 5789 struct kmem_cache *s; 5790 int err; 5791 5792 attribute = to_slab_attr(attr); 5793 s = to_slab(kobj); 5794 5795 if (!attribute->show) 5796 return -EIO; 5797 5798 err = attribute->show(s, buf); 5799 5800 return err; 5801 } 5802 5803 static ssize_t slab_attr_store(struct kobject *kobj, 5804 struct attribute *attr, 5805 const char *buf, size_t len) 5806 { 5807 struct slab_attribute *attribute; 5808 struct kmem_cache *s; 5809 int err; 5810 5811 attribute = to_slab_attr(attr); 5812 s = to_slab(kobj); 5813 5814 if (!attribute->store) 5815 return -EIO; 5816 5817 err = attribute->store(s, buf, len); 5818 return err; 5819 } 5820 5821 static void kmem_cache_release(struct kobject *k) 5822 { 5823 slab_kmem_cache_release(to_slab(k)); 5824 } 5825 5826 static const struct sysfs_ops slab_sysfs_ops = { 5827 .show = slab_attr_show, 5828 .store = slab_attr_store, 5829 }; 5830 5831 static struct kobj_type slab_ktype = { 5832 .sysfs_ops = &slab_sysfs_ops, 5833 .release = kmem_cache_release, 5834 }; 5835 5836 static struct kset *slab_kset; 5837 5838 static inline struct kset *cache_kset(struct kmem_cache *s) 5839 { 5840 return slab_kset; 5841 } 5842 5843 #define ID_STR_LENGTH 64 5844 5845 /* Create a unique string id for a slab cache: 5846 * 5847 * Format :[flags-]size 5848 */ 5849 static char *create_unique_id(struct kmem_cache *s) 5850 { 5851 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 5852 char *p = name; 5853 5854 BUG_ON(!name); 5855 5856 *p++ = ':'; 5857 /* 5858 * First flags affecting slabcache operations. We will only 5859 * get here for aliasable slabs so we do not need to support 5860 * too many flags. The flags here must cover all flags that 5861 * are matched during merging to guarantee that the id is 5862 * unique. 5863 */ 5864 if (s->flags & SLAB_CACHE_DMA) 5865 *p++ = 'd'; 5866 if (s->flags & SLAB_CACHE_DMA32) 5867 *p++ = 'D'; 5868 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5869 *p++ = 'a'; 5870 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5871 *p++ = 'F'; 5872 if (s->flags & SLAB_ACCOUNT) 5873 *p++ = 'A'; 5874 if (p != name + 1) 5875 *p++ = '-'; 5876 p += sprintf(p, "%07u", s->size); 5877 5878 BUG_ON(p > name + ID_STR_LENGTH - 1); 5879 return name; 5880 } 5881 5882 static int sysfs_slab_add(struct kmem_cache *s) 5883 { 5884 int err; 5885 const char *name; 5886 struct kset *kset = cache_kset(s); 5887 int unmergeable = slab_unmergeable(s); 5888 5889 if (!kset) { 5890 kobject_init(&s->kobj, &slab_ktype); 5891 return 0; 5892 } 5893 5894 if (!unmergeable && disable_higher_order_debug && 5895 (slub_debug & DEBUG_METADATA_FLAGS)) 5896 unmergeable = 1; 5897 5898 if (unmergeable) { 5899 /* 5900 * Slabcache can never be merged so we can use the name proper. 5901 * This is typically the case for debug situations. In that 5902 * case we can catch duplicate names easily. 5903 */ 5904 sysfs_remove_link(&slab_kset->kobj, s->name); 5905 name = s->name; 5906 } else { 5907 /* 5908 * Create a unique name for the slab as a target 5909 * for the symlinks. 5910 */ 5911 name = create_unique_id(s); 5912 } 5913 5914 s->kobj.kset = kset; 5915 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 5916 if (err) 5917 goto out; 5918 5919 err = sysfs_create_group(&s->kobj, &slab_attr_group); 5920 if (err) 5921 goto out_del_kobj; 5922 5923 if (!unmergeable) { 5924 /* Setup first alias */ 5925 sysfs_slab_alias(s, s->name); 5926 } 5927 out: 5928 if (!unmergeable) 5929 kfree(name); 5930 return err; 5931 out_del_kobj: 5932 kobject_del(&s->kobj); 5933 goto out; 5934 } 5935 5936 void sysfs_slab_unlink(struct kmem_cache *s) 5937 { 5938 if (slab_state >= FULL) 5939 kobject_del(&s->kobj); 5940 } 5941 5942 void sysfs_slab_release(struct kmem_cache *s) 5943 { 5944 if (slab_state >= FULL) 5945 kobject_put(&s->kobj); 5946 } 5947 5948 /* 5949 * Need to buffer aliases during bootup until sysfs becomes 5950 * available lest we lose that information. 5951 */ 5952 struct saved_alias { 5953 struct kmem_cache *s; 5954 const char *name; 5955 struct saved_alias *next; 5956 }; 5957 5958 static struct saved_alias *alias_list; 5959 5960 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 5961 { 5962 struct saved_alias *al; 5963 5964 if (slab_state == FULL) { 5965 /* 5966 * If we have a leftover link then remove it. 5967 */ 5968 sysfs_remove_link(&slab_kset->kobj, name); 5969 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 5970 } 5971 5972 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 5973 if (!al) 5974 return -ENOMEM; 5975 5976 al->s = s; 5977 al->name = name; 5978 al->next = alias_list; 5979 alias_list = al; 5980 return 0; 5981 } 5982 5983 static int __init slab_sysfs_init(void) 5984 { 5985 struct kmem_cache *s; 5986 int err; 5987 5988 mutex_lock(&slab_mutex); 5989 5990 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 5991 if (!slab_kset) { 5992 mutex_unlock(&slab_mutex); 5993 pr_err("Cannot register slab subsystem.\n"); 5994 return -ENOSYS; 5995 } 5996 5997 slab_state = FULL; 5998 5999 list_for_each_entry(s, &slab_caches, list) { 6000 err = sysfs_slab_add(s); 6001 if (err) 6002 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 6003 s->name); 6004 } 6005 6006 while (alias_list) { 6007 struct saved_alias *al = alias_list; 6008 6009 alias_list = alias_list->next; 6010 err = sysfs_slab_alias(al->s, al->name); 6011 if (err) 6012 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 6013 al->name); 6014 kfree(al); 6015 } 6016 6017 mutex_unlock(&slab_mutex); 6018 return 0; 6019 } 6020 6021 __initcall(slab_sysfs_init); 6022 #endif /* CONFIG_SYSFS */ 6023 6024 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 6025 static int slab_debugfs_show(struct seq_file *seq, void *v) 6026 { 6027 6028 struct location *l; 6029 unsigned int idx = *(unsigned int *)v; 6030 struct loc_track *t = seq->private; 6031 6032 if (idx < t->count) { 6033 l = &t->loc[idx]; 6034 6035 seq_printf(seq, "%7ld ", l->count); 6036 6037 if (l->addr) 6038 seq_printf(seq, "%pS", (void *)l->addr); 6039 else 6040 seq_puts(seq, "<not-available>"); 6041 6042 if (l->sum_time != l->min_time) { 6043 seq_printf(seq, " age=%ld/%llu/%ld", 6044 l->min_time, div_u64(l->sum_time, l->count), 6045 l->max_time); 6046 } else 6047 seq_printf(seq, " age=%ld", l->min_time); 6048 6049 if (l->min_pid != l->max_pid) 6050 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 6051 else 6052 seq_printf(seq, " pid=%ld", 6053 l->min_pid); 6054 6055 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 6056 seq_printf(seq, " cpus=%*pbl", 6057 cpumask_pr_args(to_cpumask(l->cpus))); 6058 6059 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 6060 seq_printf(seq, " nodes=%*pbl", 6061 nodemask_pr_args(&l->nodes)); 6062 6063 seq_puts(seq, "\n"); 6064 } 6065 6066 if (!idx && !t->count) 6067 seq_puts(seq, "No data\n"); 6068 6069 return 0; 6070 } 6071 6072 static void slab_debugfs_stop(struct seq_file *seq, void *v) 6073 { 6074 } 6075 6076 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 6077 { 6078 struct loc_track *t = seq->private; 6079 6080 v = ppos; 6081 ++*ppos; 6082 if (*ppos <= t->count) 6083 return v; 6084 6085 return NULL; 6086 } 6087 6088 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 6089 { 6090 return ppos; 6091 } 6092 6093 static const struct seq_operations slab_debugfs_sops = { 6094 .start = slab_debugfs_start, 6095 .next = slab_debugfs_next, 6096 .stop = slab_debugfs_stop, 6097 .show = slab_debugfs_show, 6098 }; 6099 6100 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 6101 { 6102 6103 struct kmem_cache_node *n; 6104 enum track_item alloc; 6105 int node; 6106 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 6107 sizeof(struct loc_track)); 6108 struct kmem_cache *s = file_inode(filep)->i_private; 6109 unsigned long *obj_map; 6110 6111 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 6112 if (!obj_map) 6113 return -ENOMEM; 6114 6115 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 6116 alloc = TRACK_ALLOC; 6117 else 6118 alloc = TRACK_FREE; 6119 6120 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 6121 bitmap_free(obj_map); 6122 return -ENOMEM; 6123 } 6124 6125 for_each_kmem_cache_node(s, node, n) { 6126 unsigned long flags; 6127 struct page *page; 6128 6129 if (!atomic_long_read(&n->nr_slabs)) 6130 continue; 6131 6132 spin_lock_irqsave(&n->list_lock, flags); 6133 list_for_each_entry(page, &n->partial, slab_list) 6134 process_slab(t, s, page, alloc, obj_map); 6135 list_for_each_entry(page, &n->full, slab_list) 6136 process_slab(t, s, page, alloc, obj_map); 6137 spin_unlock_irqrestore(&n->list_lock, flags); 6138 } 6139 6140 bitmap_free(obj_map); 6141 return 0; 6142 } 6143 6144 static int slab_debug_trace_release(struct inode *inode, struct file *file) 6145 { 6146 struct seq_file *seq = file->private_data; 6147 struct loc_track *t = seq->private; 6148 6149 free_loc_track(t); 6150 return seq_release_private(inode, file); 6151 } 6152 6153 static const struct file_operations slab_debugfs_fops = { 6154 .open = slab_debug_trace_open, 6155 .read = seq_read, 6156 .llseek = seq_lseek, 6157 .release = slab_debug_trace_release, 6158 }; 6159 6160 static void debugfs_slab_add(struct kmem_cache *s) 6161 { 6162 struct dentry *slab_cache_dir; 6163 6164 if (unlikely(!slab_debugfs_root)) 6165 return; 6166 6167 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 6168 6169 debugfs_create_file("alloc_traces", 0400, 6170 slab_cache_dir, s, &slab_debugfs_fops); 6171 6172 debugfs_create_file("free_traces", 0400, 6173 slab_cache_dir, s, &slab_debugfs_fops); 6174 } 6175 6176 void debugfs_slab_release(struct kmem_cache *s) 6177 { 6178 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); 6179 } 6180 6181 static int __init slab_debugfs_init(void) 6182 { 6183 struct kmem_cache *s; 6184 6185 slab_debugfs_root = debugfs_create_dir("slab", NULL); 6186 6187 list_for_each_entry(s, &slab_caches, list) 6188 if (s->flags & SLAB_STORE_USER) 6189 debugfs_slab_add(s); 6190 6191 return 0; 6192 6193 } 6194 __initcall(slab_debugfs_init); 6195 #endif 6196 /* 6197 * The /proc/slabinfo ABI 6198 */ 6199 #ifdef CONFIG_SLUB_DEBUG 6200 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 6201 { 6202 unsigned long nr_slabs = 0; 6203 unsigned long nr_objs = 0; 6204 unsigned long nr_free = 0; 6205 int node; 6206 struct kmem_cache_node *n; 6207 6208 for_each_kmem_cache_node(s, node, n) { 6209 nr_slabs += node_nr_slabs(n); 6210 nr_objs += node_nr_objs(n); 6211 nr_free += count_partial(n, count_free); 6212 } 6213 6214 sinfo->active_objs = nr_objs - nr_free; 6215 sinfo->num_objs = nr_objs; 6216 sinfo->active_slabs = nr_slabs; 6217 sinfo->num_slabs = nr_slabs; 6218 sinfo->objects_per_slab = oo_objects(s->oo); 6219 sinfo->cache_order = oo_order(s->oo); 6220 } 6221 6222 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) 6223 { 6224 } 6225 6226 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 6227 size_t count, loff_t *ppos) 6228 { 6229 return -EIO; 6230 } 6231 #endif /* CONFIG_SLUB_DEBUG */ 6232