1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/slab.c 4 * Written by Mark Hemment, 1996/97. 5 * (markhe@nextd.demon.co.uk) 6 * 7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 8 * 9 * Major cleanup, different bufctl logic, per-cpu arrays 10 * (c) 2000 Manfred Spraul 11 * 12 * Cleanup, make the head arrays unconditional, preparation for NUMA 13 * (c) 2002 Manfred Spraul 14 * 15 * An implementation of the Slab Allocator as described in outline in; 16 * UNIX Internals: The New Frontiers by Uresh Vahalia 17 * Pub: Prentice Hall ISBN 0-13-101908-2 18 * or with a little more detail in; 19 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 20 * Jeff Bonwick (Sun Microsystems). 21 * Presented at: USENIX Summer 1994 Technical Conference 22 * 23 * The memory is organized in caches, one cache for each object type. 24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 25 * Each cache consists out of many slabs (they are small (usually one 26 * page long) and always contiguous), and each slab contains multiple 27 * initialized objects. 28 * 29 * This means, that your constructor is used only for newly allocated 30 * slabs and you must pass objects with the same initializations to 31 * kmem_cache_free. 32 * 33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 34 * normal). If you need a special memory type, then must create a new 35 * cache for that memory type. 36 * 37 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 38 * full slabs with 0 free objects 39 * partial slabs 40 * empty slabs with no allocated objects 41 * 42 * If partial slabs exist, then new allocations come from these slabs, 43 * otherwise from empty slabs or new slabs are allocated. 44 * 45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 47 * 48 * Each cache has a short per-cpu head array, most allocs 49 * and frees go into that array, and if that array overflows, then 1/2 50 * of the entries in the array are given back into the global cache. 51 * The head array is strictly LIFO and should improve the cache hit rates. 52 * On SMP, it additionally reduces the spinlock operations. 53 * 54 * The c_cpuarray may not be read with enabled local interrupts - 55 * it's changed with a smp_call_function(). 56 * 57 * SMP synchronization: 58 * constructors and destructors are called without any locking. 59 * Several members in struct kmem_cache and struct slab never change, they 60 * are accessed without any locking. 61 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 62 * and local interrupts are disabled so slab code is preempt-safe. 63 * The non-constant members are protected with a per-cache irq spinlock. 64 * 65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 66 * in 2000 - many ideas in the current implementation are derived from 67 * his patch. 68 * 69 * Further notes from the original documentation: 70 * 71 * 11 April '97. Started multi-threading - markhe 72 * The global cache-chain is protected by the mutex 'slab_mutex'. 73 * The sem is only needed when accessing/extending the cache-chain, which 74 * can never happen inside an interrupt (kmem_cache_create(), 75 * kmem_cache_shrink() and kmem_cache_reap()). 76 * 77 * At present, each engine can be growing a cache. This should be blocked. 78 * 79 * 15 March 2005. NUMA slab allocator. 80 * Shai Fultheim <shai@scalex86.org>. 81 * Shobhit Dayal <shobhit@calsoftinc.com> 82 * Alok N Kataria <alokk@calsoftinc.com> 83 * Christoph Lameter <christoph@lameter.com> 84 * 85 * Modified the slab allocator to be node aware on NUMA systems. 86 * Each node has its own list of partial, free and full slabs. 87 * All object allocations for a node occur from node specific slab lists. 88 */ 89 90 #include <linux/slab.h> 91 #include <linux/mm.h> 92 #include <linux/poison.h> 93 #include <linux/swap.h> 94 #include <linux/cache.h> 95 #include <linux/interrupt.h> 96 #include <linux/init.h> 97 #include <linux/compiler.h> 98 #include <linux/cpuset.h> 99 #include <linux/proc_fs.h> 100 #include <linux/seq_file.h> 101 #include <linux/notifier.h> 102 #include <linux/kallsyms.h> 103 #include <linux/kfence.h> 104 #include <linux/cpu.h> 105 #include <linux/sysctl.h> 106 #include <linux/module.h> 107 #include <linux/rcupdate.h> 108 #include <linux/string.h> 109 #include <linux/uaccess.h> 110 #include <linux/nodemask.h> 111 #include <linux/kmemleak.h> 112 #include <linux/mempolicy.h> 113 #include <linux/mutex.h> 114 #include <linux/fault-inject.h> 115 #include <linux/rtmutex.h> 116 #include <linux/reciprocal_div.h> 117 #include <linux/debugobjects.h> 118 #include <linux/memory.h> 119 #include <linux/prefetch.h> 120 #include <linux/sched/task_stack.h> 121 122 #include <net/sock.h> 123 124 #include <asm/cacheflush.h> 125 #include <asm/tlbflush.h> 126 #include <asm/page.h> 127 128 #include <trace/events/kmem.h> 129 130 #include "internal.h" 131 132 #include "slab.h" 133 134 /* 135 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 136 * 0 for faster, smaller code (especially in the critical paths). 137 * 138 * STATS - 1 to collect stats for /proc/slabinfo. 139 * 0 for faster, smaller code (especially in the critical paths). 140 * 141 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 142 */ 143 144 #ifdef CONFIG_DEBUG_SLAB 145 #define DEBUG 1 146 #define STATS 1 147 #define FORCED_DEBUG 1 148 #else 149 #define DEBUG 0 150 #define STATS 0 151 #define FORCED_DEBUG 0 152 #endif 153 154 /* Shouldn't this be in a header file somewhere? */ 155 #define BYTES_PER_WORD sizeof(void *) 156 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 157 158 #ifndef ARCH_KMALLOC_FLAGS 159 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 160 #endif 161 162 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 163 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 164 165 #if FREELIST_BYTE_INDEX 166 typedef unsigned char freelist_idx_t; 167 #else 168 typedef unsigned short freelist_idx_t; 169 #endif 170 171 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 172 173 /* 174 * struct array_cache 175 * 176 * Purpose: 177 * - LIFO ordering, to hand out cache-warm objects from _alloc 178 * - reduce the number of linked list operations 179 * - reduce spinlock operations 180 * 181 * The limit is stored in the per-cpu structure to reduce the data cache 182 * footprint. 183 * 184 */ 185 struct array_cache { 186 unsigned int avail; 187 unsigned int limit; 188 unsigned int batchcount; 189 unsigned int touched; 190 void *entry[]; /* 191 * Must have this definition in here for the proper 192 * alignment of array_cache. Also simplifies accessing 193 * the entries. 194 */ 195 }; 196 197 struct alien_cache { 198 spinlock_t lock; 199 struct array_cache ac; 200 }; 201 202 /* 203 * Need this for bootstrapping a per node allocator. 204 */ 205 #define NUM_INIT_LISTS (2 * MAX_NUMNODES) 206 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 207 #define CACHE_CACHE 0 208 #define SIZE_NODE (MAX_NUMNODES) 209 210 static int drain_freelist(struct kmem_cache *cache, 211 struct kmem_cache_node *n, int tofree); 212 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 213 int node, struct list_head *list); 214 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); 215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 216 static void cache_reap(struct work_struct *unused); 217 218 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 219 void **list); 220 static inline void fixup_slab_list(struct kmem_cache *cachep, 221 struct kmem_cache_node *n, struct slab *slab, 222 void **list); 223 static int slab_early_init = 1; 224 225 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 226 227 static void kmem_cache_node_init(struct kmem_cache_node *parent) 228 { 229 INIT_LIST_HEAD(&parent->slabs_full); 230 INIT_LIST_HEAD(&parent->slabs_partial); 231 INIT_LIST_HEAD(&parent->slabs_free); 232 parent->total_slabs = 0; 233 parent->free_slabs = 0; 234 parent->shared = NULL; 235 parent->alien = NULL; 236 parent->colour_next = 0; 237 spin_lock_init(&parent->list_lock); 238 parent->free_objects = 0; 239 parent->free_touched = 0; 240 } 241 242 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 243 do { \ 244 INIT_LIST_HEAD(listp); \ 245 list_splice(&get_node(cachep, nodeid)->slab, listp); \ 246 } while (0) 247 248 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 249 do { \ 250 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 251 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 252 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 253 } while (0) 254 255 #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U) 256 #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U) 257 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) 258 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 259 260 #define BATCHREFILL_LIMIT 16 261 /* 262 * Optimization question: fewer reaps means less probability for unnecessary 263 * cpucache drain/refill cycles. 264 * 265 * OTOH the cpuarrays can contain lots of objects, 266 * which could lock up otherwise freeable slabs. 267 */ 268 #define REAPTIMEOUT_AC (2*HZ) 269 #define REAPTIMEOUT_NODE (4*HZ) 270 271 #if STATS 272 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 273 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 274 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 275 #define STATS_INC_GROWN(x) ((x)->grown++) 276 #define STATS_ADD_REAPED(x, y) ((x)->reaped += (y)) 277 #define STATS_SET_HIGH(x) \ 278 do { \ 279 if ((x)->num_active > (x)->high_mark) \ 280 (x)->high_mark = (x)->num_active; \ 281 } while (0) 282 #define STATS_INC_ERR(x) ((x)->errors++) 283 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 284 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 285 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 286 #define STATS_SET_FREEABLE(x, i) \ 287 do { \ 288 if ((x)->max_freeable < i) \ 289 (x)->max_freeable = i; \ 290 } while (0) 291 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 292 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 293 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 294 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 295 #else 296 #define STATS_INC_ACTIVE(x) do { } while (0) 297 #define STATS_DEC_ACTIVE(x) do { } while (0) 298 #define STATS_INC_ALLOCED(x) do { } while (0) 299 #define STATS_INC_GROWN(x) do { } while (0) 300 #define STATS_ADD_REAPED(x, y) do { (void)(y); } while (0) 301 #define STATS_SET_HIGH(x) do { } while (0) 302 #define STATS_INC_ERR(x) do { } while (0) 303 #define STATS_INC_NODEALLOCS(x) do { } while (0) 304 #define STATS_INC_NODEFREES(x) do { } while (0) 305 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 306 #define STATS_SET_FREEABLE(x, i) do { } while (0) 307 #define STATS_INC_ALLOCHIT(x) do { } while (0) 308 #define STATS_INC_ALLOCMISS(x) do { } while (0) 309 #define STATS_INC_FREEHIT(x) do { } while (0) 310 #define STATS_INC_FREEMISS(x) do { } while (0) 311 #endif 312 313 #if DEBUG 314 315 /* 316 * memory layout of objects: 317 * 0 : objp 318 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 319 * the end of an object is aligned with the end of the real 320 * allocation. Catches writes behind the end of the allocation. 321 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 322 * redzone word. 323 * cachep->obj_offset: The real object. 324 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 325 * cachep->size - 1* BYTES_PER_WORD: last caller address 326 * [BYTES_PER_WORD long] 327 */ 328 static int obj_offset(struct kmem_cache *cachep) 329 { 330 return cachep->obj_offset; 331 } 332 333 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 334 { 335 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 336 return (unsigned long long *) (objp + obj_offset(cachep) - 337 sizeof(unsigned long long)); 338 } 339 340 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 341 { 342 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 343 if (cachep->flags & SLAB_STORE_USER) 344 return (unsigned long long *)(objp + cachep->size - 345 sizeof(unsigned long long) - 346 REDZONE_ALIGN); 347 return (unsigned long long *) (objp + cachep->size - 348 sizeof(unsigned long long)); 349 } 350 351 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 352 { 353 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 354 return (void **)(objp + cachep->size - BYTES_PER_WORD); 355 } 356 357 #else 358 359 #define obj_offset(x) 0 360 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 361 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 362 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 363 364 #endif 365 366 /* 367 * Do not go above this order unless 0 objects fit into the slab or 368 * overridden on the command line. 369 */ 370 #define SLAB_MAX_ORDER_HI 1 371 #define SLAB_MAX_ORDER_LO 0 372 static int slab_max_order = SLAB_MAX_ORDER_LO; 373 static bool slab_max_order_set __initdata; 374 375 static inline void *index_to_obj(struct kmem_cache *cache, 376 const struct slab *slab, unsigned int idx) 377 { 378 return slab->s_mem + cache->size * idx; 379 } 380 381 #define BOOT_CPUCACHE_ENTRIES 1 382 /* internal cache of cache description objs */ 383 static struct kmem_cache kmem_cache_boot = { 384 .batchcount = 1, 385 .limit = BOOT_CPUCACHE_ENTRIES, 386 .shared = 1, 387 .size = sizeof(struct kmem_cache), 388 .name = "kmem_cache", 389 }; 390 391 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 392 393 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 394 { 395 return this_cpu_ptr(cachep->cpu_cache); 396 } 397 398 /* 399 * Calculate the number of objects and left-over bytes for a given buffer size. 400 */ 401 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, 402 slab_flags_t flags, size_t *left_over) 403 { 404 unsigned int num; 405 size_t slab_size = PAGE_SIZE << gfporder; 406 407 /* 408 * The slab management structure can be either off the slab or 409 * on it. For the latter case, the memory allocated for a 410 * slab is used for: 411 * 412 * - @buffer_size bytes for each object 413 * - One freelist_idx_t for each object 414 * 415 * We don't need to consider alignment of freelist because 416 * freelist will be at the end of slab page. The objects will be 417 * at the correct alignment. 418 * 419 * If the slab management structure is off the slab, then the 420 * alignment will already be calculated into the size. Because 421 * the slabs are all pages aligned, the objects will be at the 422 * correct alignment when allocated. 423 */ 424 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { 425 num = slab_size / buffer_size; 426 *left_over = slab_size % buffer_size; 427 } else { 428 num = slab_size / (buffer_size + sizeof(freelist_idx_t)); 429 *left_over = slab_size % 430 (buffer_size + sizeof(freelist_idx_t)); 431 } 432 433 return num; 434 } 435 436 #if DEBUG 437 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 438 439 static void __slab_error(const char *function, struct kmem_cache *cachep, 440 char *msg) 441 { 442 pr_err("slab error in %s(): cache `%s': %s\n", 443 function, cachep->name, msg); 444 dump_stack(); 445 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 446 } 447 #endif 448 449 /* 450 * By default on NUMA we use alien caches to stage the freeing of 451 * objects allocated from other nodes. This causes massive memory 452 * inefficiencies when using fake NUMA setup to split memory into a 453 * large number of small nodes, so it can be disabled on the command 454 * line 455 */ 456 457 static int use_alien_caches __read_mostly = 1; 458 static int __init noaliencache_setup(char *s) 459 { 460 use_alien_caches = 0; 461 return 1; 462 } 463 __setup("noaliencache", noaliencache_setup); 464 465 static int __init slab_max_order_setup(char *str) 466 { 467 get_option(&str, &slab_max_order); 468 slab_max_order = slab_max_order < 0 ? 0 : 469 min(slab_max_order, MAX_ORDER - 1); 470 slab_max_order_set = true; 471 472 return 1; 473 } 474 __setup("slab_max_order=", slab_max_order_setup); 475 476 #ifdef CONFIG_NUMA 477 /* 478 * Special reaping functions for NUMA systems called from cache_reap(). 479 * These take care of doing round robin flushing of alien caches (containing 480 * objects freed on different nodes from which they were allocated) and the 481 * flushing of remote pcps by calling drain_node_pages. 482 */ 483 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 484 485 static void init_reap_node(int cpu) 486 { 487 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), 488 node_online_map); 489 } 490 491 static void next_reap_node(void) 492 { 493 int node = __this_cpu_read(slab_reap_node); 494 495 node = next_node_in(node, node_online_map); 496 __this_cpu_write(slab_reap_node, node); 497 } 498 499 #else 500 #define init_reap_node(cpu) do { } while (0) 501 #define next_reap_node(void) do { } while (0) 502 #endif 503 504 /* 505 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 506 * via the workqueue/eventd. 507 * Add the CPU number into the expiration time to minimize the possibility of 508 * the CPUs getting into lockstep and contending for the global cache chain 509 * lock. 510 */ 511 static void start_cpu_timer(int cpu) 512 { 513 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 514 515 if (reap_work->work.func == NULL) { 516 init_reap_node(cpu); 517 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 518 schedule_delayed_work_on(cpu, reap_work, 519 __round_jiffies_relative(HZ, cpu)); 520 } 521 } 522 523 static void init_arraycache(struct array_cache *ac, int limit, int batch) 524 { 525 if (ac) { 526 ac->avail = 0; 527 ac->limit = limit; 528 ac->batchcount = batch; 529 ac->touched = 0; 530 } 531 } 532 533 static struct array_cache *alloc_arraycache(int node, int entries, 534 int batchcount, gfp_t gfp) 535 { 536 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); 537 struct array_cache *ac = NULL; 538 539 ac = kmalloc_node(memsize, gfp, node); 540 /* 541 * The array_cache structures contain pointers to free object. 542 * However, when such objects are allocated or transferred to another 543 * cache the pointers are not cleared and they could be counted as 544 * valid references during a kmemleak scan. Therefore, kmemleak must 545 * not scan such objects. 546 */ 547 kmemleak_no_scan(ac); 548 init_arraycache(ac, entries, batchcount); 549 return ac; 550 } 551 552 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, 553 struct slab *slab, void *objp) 554 { 555 struct kmem_cache_node *n; 556 int slab_node; 557 LIST_HEAD(list); 558 559 slab_node = slab_nid(slab); 560 n = get_node(cachep, slab_node); 561 562 spin_lock(&n->list_lock); 563 free_block(cachep, &objp, 1, slab_node, &list); 564 spin_unlock(&n->list_lock); 565 566 slabs_destroy(cachep, &list); 567 } 568 569 /* 570 * Transfer objects in one arraycache to another. 571 * Locking must be handled by the caller. 572 * 573 * Return the number of entries transferred. 574 */ 575 static int transfer_objects(struct array_cache *to, 576 struct array_cache *from, unsigned int max) 577 { 578 /* Figure out how many entries to transfer */ 579 int nr = min3(from->avail, max, to->limit - to->avail); 580 581 if (!nr) 582 return 0; 583 584 memcpy(to->entry + to->avail, from->entry + from->avail - nr, 585 sizeof(void *) *nr); 586 587 from->avail -= nr; 588 to->avail += nr; 589 return nr; 590 } 591 592 /* &alien->lock must be held by alien callers. */ 593 static __always_inline void __free_one(struct array_cache *ac, void *objp) 594 { 595 /* Avoid trivial double-free. */ 596 if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 597 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp)) 598 return; 599 ac->entry[ac->avail++] = objp; 600 } 601 602 #ifndef CONFIG_NUMA 603 604 #define drain_alien_cache(cachep, alien) do { } while (0) 605 #define reap_alien(cachep, n) do { } while (0) 606 607 static inline struct alien_cache **alloc_alien_cache(int node, 608 int limit, gfp_t gfp) 609 { 610 return NULL; 611 } 612 613 static inline void free_alien_cache(struct alien_cache **ac_ptr) 614 { 615 } 616 617 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 618 { 619 return 0; 620 } 621 622 static inline gfp_t gfp_exact_node(gfp_t flags) 623 { 624 return flags & ~__GFP_NOFAIL; 625 } 626 627 #else /* CONFIG_NUMA */ 628 629 static struct alien_cache *__alloc_alien_cache(int node, int entries, 630 int batch, gfp_t gfp) 631 { 632 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); 633 struct alien_cache *alc = NULL; 634 635 alc = kmalloc_node(memsize, gfp, node); 636 if (alc) { 637 kmemleak_no_scan(alc); 638 init_arraycache(&alc->ac, entries, batch); 639 spin_lock_init(&alc->lock); 640 } 641 return alc; 642 } 643 644 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 645 { 646 struct alien_cache **alc_ptr; 647 int i; 648 649 if (limit > 1) 650 limit = 12; 651 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); 652 if (!alc_ptr) 653 return NULL; 654 655 for_each_node(i) { 656 if (i == node || !node_online(i)) 657 continue; 658 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); 659 if (!alc_ptr[i]) { 660 for (i--; i >= 0; i--) 661 kfree(alc_ptr[i]); 662 kfree(alc_ptr); 663 return NULL; 664 } 665 } 666 return alc_ptr; 667 } 668 669 static void free_alien_cache(struct alien_cache **alc_ptr) 670 { 671 int i; 672 673 if (!alc_ptr) 674 return; 675 for_each_node(i) 676 kfree(alc_ptr[i]); 677 kfree(alc_ptr); 678 } 679 680 static void __drain_alien_cache(struct kmem_cache *cachep, 681 struct array_cache *ac, int node, 682 struct list_head *list) 683 { 684 struct kmem_cache_node *n = get_node(cachep, node); 685 686 if (ac->avail) { 687 spin_lock(&n->list_lock); 688 /* 689 * Stuff objects into the remote nodes shared array first. 690 * That way we could avoid the overhead of putting the objects 691 * into the free lists and getting them back later. 692 */ 693 if (n->shared) 694 transfer_objects(n->shared, ac, ac->limit); 695 696 free_block(cachep, ac->entry, ac->avail, node, list); 697 ac->avail = 0; 698 spin_unlock(&n->list_lock); 699 } 700 } 701 702 /* 703 * Called from cache_reap() to regularly drain alien caches round robin. 704 */ 705 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 706 { 707 int node = __this_cpu_read(slab_reap_node); 708 709 if (n->alien) { 710 struct alien_cache *alc = n->alien[node]; 711 struct array_cache *ac; 712 713 if (alc) { 714 ac = &alc->ac; 715 if (ac->avail && spin_trylock_irq(&alc->lock)) { 716 LIST_HEAD(list); 717 718 __drain_alien_cache(cachep, ac, node, &list); 719 spin_unlock_irq(&alc->lock); 720 slabs_destroy(cachep, &list); 721 } 722 } 723 } 724 } 725 726 static void drain_alien_cache(struct kmem_cache *cachep, 727 struct alien_cache **alien) 728 { 729 int i = 0; 730 struct alien_cache *alc; 731 struct array_cache *ac; 732 unsigned long flags; 733 734 for_each_online_node(i) { 735 alc = alien[i]; 736 if (alc) { 737 LIST_HEAD(list); 738 739 ac = &alc->ac; 740 spin_lock_irqsave(&alc->lock, flags); 741 __drain_alien_cache(cachep, ac, i, &list); 742 spin_unlock_irqrestore(&alc->lock, flags); 743 slabs_destroy(cachep, &list); 744 } 745 } 746 } 747 748 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, 749 int node, int slab_node) 750 { 751 struct kmem_cache_node *n; 752 struct alien_cache *alien = NULL; 753 struct array_cache *ac; 754 LIST_HEAD(list); 755 756 n = get_node(cachep, node); 757 STATS_INC_NODEFREES(cachep); 758 if (n->alien && n->alien[slab_node]) { 759 alien = n->alien[slab_node]; 760 ac = &alien->ac; 761 spin_lock(&alien->lock); 762 if (unlikely(ac->avail == ac->limit)) { 763 STATS_INC_ACOVERFLOW(cachep); 764 __drain_alien_cache(cachep, ac, slab_node, &list); 765 } 766 __free_one(ac, objp); 767 spin_unlock(&alien->lock); 768 slabs_destroy(cachep, &list); 769 } else { 770 n = get_node(cachep, slab_node); 771 spin_lock(&n->list_lock); 772 free_block(cachep, &objp, 1, slab_node, &list); 773 spin_unlock(&n->list_lock); 774 slabs_destroy(cachep, &list); 775 } 776 return 1; 777 } 778 779 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 780 { 781 int slab_node = slab_nid(virt_to_slab(objp)); 782 int node = numa_mem_id(); 783 /* 784 * Make sure we are not freeing an object from another node to the array 785 * cache on this cpu. 786 */ 787 if (likely(node == slab_node)) 788 return 0; 789 790 return __cache_free_alien(cachep, objp, node, slab_node); 791 } 792 793 /* 794 * Construct gfp mask to allocate from a specific node but do not reclaim or 795 * warn about failures. 796 */ 797 static inline gfp_t gfp_exact_node(gfp_t flags) 798 { 799 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 800 } 801 #endif 802 803 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) 804 { 805 struct kmem_cache_node *n; 806 807 /* 808 * Set up the kmem_cache_node for cpu before we can 809 * begin anything. Make sure some other cpu on this 810 * node has not already allocated this 811 */ 812 n = get_node(cachep, node); 813 if (n) { 814 spin_lock_irq(&n->list_lock); 815 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + 816 cachep->num; 817 spin_unlock_irq(&n->list_lock); 818 819 return 0; 820 } 821 822 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 823 if (!n) 824 return -ENOMEM; 825 826 kmem_cache_node_init(n); 827 n->next_reap = jiffies + REAPTIMEOUT_NODE + 828 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 829 830 n->free_limit = 831 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; 832 833 /* 834 * The kmem_cache_nodes don't come and go as CPUs 835 * come and go. slab_mutex provides sufficient 836 * protection here. 837 */ 838 cachep->node[node] = n; 839 840 return 0; 841 } 842 843 #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP) 844 /* 845 * Allocates and initializes node for a node on each slab cache, used for 846 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 847 * will be allocated off-node since memory is not yet online for the new node. 848 * When hotplugging memory or a cpu, existing nodes are not replaced if 849 * already in use. 850 * 851 * Must hold slab_mutex. 852 */ 853 static int init_cache_node_node(int node) 854 { 855 int ret; 856 struct kmem_cache *cachep; 857 858 list_for_each_entry(cachep, &slab_caches, list) { 859 ret = init_cache_node(cachep, node, GFP_KERNEL); 860 if (ret) 861 return ret; 862 } 863 864 return 0; 865 } 866 #endif 867 868 static int setup_kmem_cache_node(struct kmem_cache *cachep, 869 int node, gfp_t gfp, bool force_change) 870 { 871 int ret = -ENOMEM; 872 struct kmem_cache_node *n; 873 struct array_cache *old_shared = NULL; 874 struct array_cache *new_shared = NULL; 875 struct alien_cache **new_alien = NULL; 876 LIST_HEAD(list); 877 878 if (use_alien_caches) { 879 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 880 if (!new_alien) 881 goto fail; 882 } 883 884 if (cachep->shared) { 885 new_shared = alloc_arraycache(node, 886 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); 887 if (!new_shared) 888 goto fail; 889 } 890 891 ret = init_cache_node(cachep, node, gfp); 892 if (ret) 893 goto fail; 894 895 n = get_node(cachep, node); 896 spin_lock_irq(&n->list_lock); 897 if (n->shared && force_change) { 898 free_block(cachep, n->shared->entry, 899 n->shared->avail, node, &list); 900 n->shared->avail = 0; 901 } 902 903 if (!n->shared || force_change) { 904 old_shared = n->shared; 905 n->shared = new_shared; 906 new_shared = NULL; 907 } 908 909 if (!n->alien) { 910 n->alien = new_alien; 911 new_alien = NULL; 912 } 913 914 spin_unlock_irq(&n->list_lock); 915 slabs_destroy(cachep, &list); 916 917 /* 918 * To protect lockless access to n->shared during irq disabled context. 919 * If n->shared isn't NULL in irq disabled context, accessing to it is 920 * guaranteed to be valid until irq is re-enabled, because it will be 921 * freed after synchronize_rcu(). 922 */ 923 if (old_shared && force_change) 924 synchronize_rcu(); 925 926 fail: 927 kfree(old_shared); 928 kfree(new_shared); 929 free_alien_cache(new_alien); 930 931 return ret; 932 } 933 934 #ifdef CONFIG_SMP 935 936 static void cpuup_canceled(long cpu) 937 { 938 struct kmem_cache *cachep; 939 struct kmem_cache_node *n = NULL; 940 int node = cpu_to_mem(cpu); 941 const struct cpumask *mask = cpumask_of_node(node); 942 943 list_for_each_entry(cachep, &slab_caches, list) { 944 struct array_cache *nc; 945 struct array_cache *shared; 946 struct alien_cache **alien; 947 LIST_HEAD(list); 948 949 n = get_node(cachep, node); 950 if (!n) 951 continue; 952 953 spin_lock_irq(&n->list_lock); 954 955 /* Free limit for this kmem_cache_node */ 956 n->free_limit -= cachep->batchcount; 957 958 /* cpu is dead; no one can alloc from it. */ 959 nc = per_cpu_ptr(cachep->cpu_cache, cpu); 960 free_block(cachep, nc->entry, nc->avail, node, &list); 961 nc->avail = 0; 962 963 if (!cpumask_empty(mask)) { 964 spin_unlock_irq(&n->list_lock); 965 goto free_slab; 966 } 967 968 shared = n->shared; 969 if (shared) { 970 free_block(cachep, shared->entry, 971 shared->avail, node, &list); 972 n->shared = NULL; 973 } 974 975 alien = n->alien; 976 n->alien = NULL; 977 978 spin_unlock_irq(&n->list_lock); 979 980 kfree(shared); 981 if (alien) { 982 drain_alien_cache(cachep, alien); 983 free_alien_cache(alien); 984 } 985 986 free_slab: 987 slabs_destroy(cachep, &list); 988 } 989 /* 990 * In the previous loop, all the objects were freed to 991 * the respective cache's slabs, now we can go ahead and 992 * shrink each nodelist to its limit. 993 */ 994 list_for_each_entry(cachep, &slab_caches, list) { 995 n = get_node(cachep, node); 996 if (!n) 997 continue; 998 drain_freelist(cachep, n, INT_MAX); 999 } 1000 } 1001 1002 static int cpuup_prepare(long cpu) 1003 { 1004 struct kmem_cache *cachep; 1005 int node = cpu_to_mem(cpu); 1006 int err; 1007 1008 /* 1009 * We need to do this right in the beginning since 1010 * alloc_arraycache's are going to use this list. 1011 * kmalloc_node allows us to add the slab to the right 1012 * kmem_cache_node and not this cpu's kmem_cache_node 1013 */ 1014 err = init_cache_node_node(node); 1015 if (err < 0) 1016 goto bad; 1017 1018 /* 1019 * Now we can go ahead with allocating the shared arrays and 1020 * array caches 1021 */ 1022 list_for_each_entry(cachep, &slab_caches, list) { 1023 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); 1024 if (err) 1025 goto bad; 1026 } 1027 1028 return 0; 1029 bad: 1030 cpuup_canceled(cpu); 1031 return -ENOMEM; 1032 } 1033 1034 int slab_prepare_cpu(unsigned int cpu) 1035 { 1036 int err; 1037 1038 mutex_lock(&slab_mutex); 1039 err = cpuup_prepare(cpu); 1040 mutex_unlock(&slab_mutex); 1041 return err; 1042 } 1043 1044 /* 1045 * This is called for a failed online attempt and for a successful 1046 * offline. 1047 * 1048 * Even if all the cpus of a node are down, we don't free the 1049 * kmem_cache_node of any cache. This is to avoid a race between cpu_down, and 1050 * a kmalloc allocation from another cpu for memory from the node of 1051 * the cpu going down. The kmem_cache_node structure is usually allocated from 1052 * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). 1053 */ 1054 int slab_dead_cpu(unsigned int cpu) 1055 { 1056 mutex_lock(&slab_mutex); 1057 cpuup_canceled(cpu); 1058 mutex_unlock(&slab_mutex); 1059 return 0; 1060 } 1061 #endif 1062 1063 static int slab_online_cpu(unsigned int cpu) 1064 { 1065 start_cpu_timer(cpu); 1066 return 0; 1067 } 1068 1069 static int slab_offline_cpu(unsigned int cpu) 1070 { 1071 /* 1072 * Shutdown cache reaper. Note that the slab_mutex is held so 1073 * that if cache_reap() is invoked it cannot do anything 1074 * expensive but will only modify reap_work and reschedule the 1075 * timer. 1076 */ 1077 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1078 /* Now the cache_reaper is guaranteed to be not running. */ 1079 per_cpu(slab_reap_work, cpu).work.func = NULL; 1080 return 0; 1081 } 1082 1083 #if defined(CONFIG_NUMA) 1084 /* 1085 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1086 * Returns -EBUSY if all objects cannot be drained so that the node is not 1087 * removed. 1088 * 1089 * Must hold slab_mutex. 1090 */ 1091 static int __meminit drain_cache_node_node(int node) 1092 { 1093 struct kmem_cache *cachep; 1094 int ret = 0; 1095 1096 list_for_each_entry(cachep, &slab_caches, list) { 1097 struct kmem_cache_node *n; 1098 1099 n = get_node(cachep, node); 1100 if (!n) 1101 continue; 1102 1103 drain_freelist(cachep, n, INT_MAX); 1104 1105 if (!list_empty(&n->slabs_full) || 1106 !list_empty(&n->slabs_partial)) { 1107 ret = -EBUSY; 1108 break; 1109 } 1110 } 1111 return ret; 1112 } 1113 1114 static int __meminit slab_memory_callback(struct notifier_block *self, 1115 unsigned long action, void *arg) 1116 { 1117 struct memory_notify *mnb = arg; 1118 int ret = 0; 1119 int nid; 1120 1121 nid = mnb->status_change_nid; 1122 if (nid < 0) 1123 goto out; 1124 1125 switch (action) { 1126 case MEM_GOING_ONLINE: 1127 mutex_lock(&slab_mutex); 1128 ret = init_cache_node_node(nid); 1129 mutex_unlock(&slab_mutex); 1130 break; 1131 case MEM_GOING_OFFLINE: 1132 mutex_lock(&slab_mutex); 1133 ret = drain_cache_node_node(nid); 1134 mutex_unlock(&slab_mutex); 1135 break; 1136 case MEM_ONLINE: 1137 case MEM_OFFLINE: 1138 case MEM_CANCEL_ONLINE: 1139 case MEM_CANCEL_OFFLINE: 1140 break; 1141 } 1142 out: 1143 return notifier_from_errno(ret); 1144 } 1145 #endif /* CONFIG_NUMA */ 1146 1147 /* 1148 * swap the static kmem_cache_node with kmalloced memory 1149 */ 1150 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1151 int nodeid) 1152 { 1153 struct kmem_cache_node *ptr; 1154 1155 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1156 BUG_ON(!ptr); 1157 1158 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1159 /* 1160 * Do not assume that spinlocks can be initialized via memcpy: 1161 */ 1162 spin_lock_init(&ptr->list_lock); 1163 1164 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1165 cachep->node[nodeid] = ptr; 1166 } 1167 1168 /* 1169 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1170 * size of kmem_cache_node. 1171 */ 1172 static void __init set_up_node(struct kmem_cache *cachep, int index) 1173 { 1174 int node; 1175 1176 for_each_online_node(node) { 1177 cachep->node[node] = &init_kmem_cache_node[index + node]; 1178 cachep->node[node]->next_reap = jiffies + 1179 REAPTIMEOUT_NODE + 1180 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1181 } 1182 } 1183 1184 /* 1185 * Initialisation. Called after the page allocator have been initialised and 1186 * before smp_init(). 1187 */ 1188 void __init kmem_cache_init(void) 1189 { 1190 int i; 1191 1192 kmem_cache = &kmem_cache_boot; 1193 1194 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1) 1195 use_alien_caches = 0; 1196 1197 for (i = 0; i < NUM_INIT_LISTS; i++) 1198 kmem_cache_node_init(&init_kmem_cache_node[i]); 1199 1200 /* 1201 * Fragmentation resistance on low memory - only use bigger 1202 * page orders on machines with more than 32MB of memory if 1203 * not overridden on the command line. 1204 */ 1205 if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) 1206 slab_max_order = SLAB_MAX_ORDER_HI; 1207 1208 /* Bootstrap is tricky, because several objects are allocated 1209 * from caches that do not exist yet: 1210 * 1) initialize the kmem_cache cache: it contains the struct 1211 * kmem_cache structures of all caches, except kmem_cache itself: 1212 * kmem_cache is statically allocated. 1213 * Initially an __init data area is used for the head array and the 1214 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1215 * array at the end of the bootstrap. 1216 * 2) Create the first kmalloc cache. 1217 * The struct kmem_cache for the new cache is allocated normally. 1218 * An __init data area is used for the head array. 1219 * 3) Create the remaining kmalloc caches, with minimally sized 1220 * head arrays. 1221 * 4) Replace the __init data head arrays for kmem_cache and the first 1222 * kmalloc cache with kmalloc allocated arrays. 1223 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1224 * the other cache's with kmalloc allocated memory. 1225 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1226 */ 1227 1228 /* 1) create the kmem_cache */ 1229 1230 /* 1231 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1232 */ 1233 create_boot_cache(kmem_cache, "kmem_cache", 1234 offsetof(struct kmem_cache, node) + 1235 nr_node_ids * sizeof(struct kmem_cache_node *), 1236 SLAB_HWCACHE_ALIGN, 0, 0); 1237 list_add(&kmem_cache->list, &slab_caches); 1238 slab_state = PARTIAL; 1239 1240 /* 1241 * Initialize the caches that provide memory for the kmem_cache_node 1242 * structures first. Without this, further allocations will bug. 1243 */ 1244 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( 1245 kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], 1246 kmalloc_info[INDEX_NODE].size, 1247 ARCH_KMALLOC_FLAGS, 0, 1248 kmalloc_info[INDEX_NODE].size); 1249 slab_state = PARTIAL_NODE; 1250 setup_kmalloc_cache_index_table(); 1251 1252 slab_early_init = 0; 1253 1254 /* 5) Replace the bootstrap kmem_cache_node */ 1255 { 1256 int nid; 1257 1258 for_each_online_node(nid) { 1259 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1260 1261 init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE], 1262 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1263 } 1264 } 1265 1266 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1267 } 1268 1269 void __init kmem_cache_init_late(void) 1270 { 1271 struct kmem_cache *cachep; 1272 1273 /* 6) resize the head arrays to their final sizes */ 1274 mutex_lock(&slab_mutex); 1275 list_for_each_entry(cachep, &slab_caches, list) 1276 if (enable_cpucache(cachep, GFP_NOWAIT)) 1277 BUG(); 1278 mutex_unlock(&slab_mutex); 1279 1280 /* Done! */ 1281 slab_state = FULL; 1282 1283 #ifdef CONFIG_NUMA 1284 /* 1285 * Register a memory hotplug callback that initializes and frees 1286 * node. 1287 */ 1288 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1289 #endif 1290 1291 /* 1292 * The reap timers are started later, with a module init call: That part 1293 * of the kernel is not yet operational. 1294 */ 1295 } 1296 1297 static int __init cpucache_init(void) 1298 { 1299 int ret; 1300 1301 /* 1302 * Register the timers that return unneeded pages to the page allocator 1303 */ 1304 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", 1305 slab_online_cpu, slab_offline_cpu); 1306 WARN_ON(ret < 0); 1307 1308 return 0; 1309 } 1310 __initcall(cpucache_init); 1311 1312 static noinline void 1313 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1314 { 1315 #if DEBUG 1316 struct kmem_cache_node *n; 1317 unsigned long flags; 1318 int node; 1319 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 1320 DEFAULT_RATELIMIT_BURST); 1321 1322 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) 1323 return; 1324 1325 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 1326 nodeid, gfpflags, &gfpflags); 1327 pr_warn(" cache: %s, object size: %d, order: %d\n", 1328 cachep->name, cachep->size, cachep->gfporder); 1329 1330 for_each_kmem_cache_node(cachep, node, n) { 1331 unsigned long total_slabs, free_slabs, free_objs; 1332 1333 spin_lock_irqsave(&n->list_lock, flags); 1334 total_slabs = n->total_slabs; 1335 free_slabs = n->free_slabs; 1336 free_objs = n->free_objects; 1337 spin_unlock_irqrestore(&n->list_lock, flags); 1338 1339 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", 1340 node, total_slabs - free_slabs, total_slabs, 1341 (total_slabs * cachep->num) - free_objs, 1342 total_slabs * cachep->num); 1343 } 1344 #endif 1345 } 1346 1347 /* 1348 * Interface to system's page allocator. No need to hold the 1349 * kmem_cache_node ->list_lock. 1350 * 1351 * If we requested dmaable memory, we will get it. Even if we 1352 * did not request dmaable memory, we might get it, but that 1353 * would be relatively rare and ignorable. 1354 */ 1355 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1356 int nodeid) 1357 { 1358 struct folio *folio; 1359 struct slab *slab; 1360 1361 flags |= cachep->allocflags; 1362 1363 folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder); 1364 if (!folio) { 1365 slab_out_of_memory(cachep, flags, nodeid); 1366 return NULL; 1367 } 1368 1369 slab = folio_slab(folio); 1370 1371 account_slab(slab, cachep->gfporder, cachep, flags); 1372 __folio_set_slab(folio); 1373 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1374 if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0))) 1375 slab_set_pfmemalloc(slab); 1376 1377 return slab; 1378 } 1379 1380 /* 1381 * Interface to system's page release. 1382 */ 1383 static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) 1384 { 1385 int order = cachep->gfporder; 1386 struct folio *folio = slab_folio(slab); 1387 1388 BUG_ON(!folio_test_slab(folio)); 1389 __slab_clear_pfmemalloc(slab); 1390 __folio_clear_slab(folio); 1391 page_mapcount_reset(folio_page(folio, 0)); 1392 folio->mapping = NULL; 1393 1394 if (current->reclaim_state) 1395 current->reclaim_state->reclaimed_slab += 1 << order; 1396 unaccount_slab(slab, order, cachep); 1397 __free_pages(folio_page(folio, 0), order); 1398 } 1399 1400 static void kmem_rcu_free(struct rcu_head *head) 1401 { 1402 struct kmem_cache *cachep; 1403 struct slab *slab; 1404 1405 slab = container_of(head, struct slab, rcu_head); 1406 cachep = slab->slab_cache; 1407 1408 kmem_freepages(cachep, slab); 1409 } 1410 1411 #if DEBUG 1412 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) 1413 { 1414 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && 1415 (cachep->size % PAGE_SIZE) == 0) 1416 return true; 1417 1418 return false; 1419 } 1420 1421 #ifdef CONFIG_DEBUG_PAGEALLOC 1422 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) 1423 { 1424 if (!is_debug_pagealloc_cache(cachep)) 1425 return; 1426 1427 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); 1428 } 1429 1430 #else 1431 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, 1432 int map) {} 1433 1434 #endif 1435 1436 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1437 { 1438 int size = cachep->object_size; 1439 addr = &((char *)addr)[obj_offset(cachep)]; 1440 1441 memset(addr, val, size); 1442 *(unsigned char *)(addr + size - 1) = POISON_END; 1443 } 1444 1445 static void dump_line(char *data, int offset, int limit) 1446 { 1447 int i; 1448 unsigned char error = 0; 1449 int bad_count = 0; 1450 1451 pr_err("%03x: ", offset); 1452 for (i = 0; i < limit; i++) { 1453 if (data[offset + i] != POISON_FREE) { 1454 error = data[offset + i]; 1455 bad_count++; 1456 } 1457 } 1458 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1459 &data[offset], limit, 1); 1460 1461 if (bad_count == 1) { 1462 error ^= POISON_FREE; 1463 if (!(error & (error - 1))) { 1464 pr_err("Single bit error detected. Probably bad RAM.\n"); 1465 #ifdef CONFIG_X86 1466 pr_err("Run memtest86+ or a similar memory test tool.\n"); 1467 #else 1468 pr_err("Run a memory test tool.\n"); 1469 #endif 1470 } 1471 } 1472 } 1473 #endif 1474 1475 #if DEBUG 1476 1477 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1478 { 1479 int i, size; 1480 char *realobj; 1481 1482 if (cachep->flags & SLAB_RED_ZONE) { 1483 pr_err("Redzone: 0x%llx/0x%llx\n", 1484 *dbg_redzone1(cachep, objp), 1485 *dbg_redzone2(cachep, objp)); 1486 } 1487 1488 if (cachep->flags & SLAB_STORE_USER) 1489 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); 1490 realobj = (char *)objp + obj_offset(cachep); 1491 size = cachep->object_size; 1492 for (i = 0; i < size && lines; i += 16, lines--) { 1493 int limit; 1494 limit = 16; 1495 if (i + limit > size) 1496 limit = size - i; 1497 dump_line(realobj, i, limit); 1498 } 1499 } 1500 1501 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1502 { 1503 char *realobj; 1504 int size, i; 1505 int lines = 0; 1506 1507 if (is_debug_pagealloc_cache(cachep)) 1508 return; 1509 1510 realobj = (char *)objp + obj_offset(cachep); 1511 size = cachep->object_size; 1512 1513 for (i = 0; i < size; i++) { 1514 char exp = POISON_FREE; 1515 if (i == size - 1) 1516 exp = POISON_END; 1517 if (realobj[i] != exp) { 1518 int limit; 1519 /* Mismatch ! */ 1520 /* Print header */ 1521 if (lines == 0) { 1522 pr_err("Slab corruption (%s): %s start=%px, len=%d\n", 1523 print_tainted(), cachep->name, 1524 realobj, size); 1525 print_objinfo(cachep, objp, 0); 1526 } 1527 /* Hexdump the affected line */ 1528 i = (i / 16) * 16; 1529 limit = 16; 1530 if (i + limit > size) 1531 limit = size - i; 1532 dump_line(realobj, i, limit); 1533 i += 16; 1534 lines++; 1535 /* Limit to 5 lines */ 1536 if (lines > 5) 1537 break; 1538 } 1539 } 1540 if (lines != 0) { 1541 /* Print some data about the neighboring objects, if they 1542 * exist: 1543 */ 1544 struct slab *slab = virt_to_slab(objp); 1545 unsigned int objnr; 1546 1547 objnr = obj_to_index(cachep, slab, objp); 1548 if (objnr) { 1549 objp = index_to_obj(cachep, slab, objnr - 1); 1550 realobj = (char *)objp + obj_offset(cachep); 1551 pr_err("Prev obj: start=%px, len=%d\n", realobj, size); 1552 print_objinfo(cachep, objp, 2); 1553 } 1554 if (objnr + 1 < cachep->num) { 1555 objp = index_to_obj(cachep, slab, objnr + 1); 1556 realobj = (char *)objp + obj_offset(cachep); 1557 pr_err("Next obj: start=%px, len=%d\n", realobj, size); 1558 print_objinfo(cachep, objp, 2); 1559 } 1560 } 1561 } 1562 #endif 1563 1564 #if DEBUG 1565 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1566 struct slab *slab) 1567 { 1568 int i; 1569 1570 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { 1571 poison_obj(cachep, slab->freelist - obj_offset(cachep), 1572 POISON_FREE); 1573 } 1574 1575 for (i = 0; i < cachep->num; i++) { 1576 void *objp = index_to_obj(cachep, slab, i); 1577 1578 if (cachep->flags & SLAB_POISON) { 1579 check_poison_obj(cachep, objp); 1580 slab_kernel_map(cachep, objp, 1); 1581 } 1582 if (cachep->flags & SLAB_RED_ZONE) { 1583 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1584 slab_error(cachep, "start of a freed object was overwritten"); 1585 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1586 slab_error(cachep, "end of a freed object was overwritten"); 1587 } 1588 } 1589 } 1590 #else 1591 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1592 struct slab *slab) 1593 { 1594 } 1595 #endif 1596 1597 /** 1598 * slab_destroy - destroy and release all objects in a slab 1599 * @cachep: cache pointer being destroyed 1600 * @slab: slab being destroyed 1601 * 1602 * Destroy all the objs in a slab, and release the mem back to the system. 1603 * Before calling the slab must have been unlinked from the cache. The 1604 * kmem_cache_node ->list_lock is not held/needed. 1605 */ 1606 static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) 1607 { 1608 void *freelist; 1609 1610 freelist = slab->freelist; 1611 slab_destroy_debugcheck(cachep, slab); 1612 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) 1613 call_rcu(&slab->rcu_head, kmem_rcu_free); 1614 else 1615 kmem_freepages(cachep, slab); 1616 1617 /* 1618 * From now on, we don't use freelist 1619 * although actual page can be freed in rcu context 1620 */ 1621 if (OFF_SLAB(cachep)) 1622 kfree(freelist); 1623 } 1624 1625 /* 1626 * Update the size of the caches before calling slabs_destroy as it may 1627 * recursively call kfree. 1628 */ 1629 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) 1630 { 1631 struct slab *slab, *n; 1632 1633 list_for_each_entry_safe(slab, n, list, slab_list) { 1634 list_del(&slab->slab_list); 1635 slab_destroy(cachep, slab); 1636 } 1637 } 1638 1639 /** 1640 * calculate_slab_order - calculate size (page order) of slabs 1641 * @cachep: pointer to the cache that is being created 1642 * @size: size of objects to be created in this cache. 1643 * @flags: slab allocation flags 1644 * 1645 * Also calculates the number of objects per slab. 1646 * 1647 * This could be made much more intelligent. For now, try to avoid using 1648 * high order pages for slabs. When the gfp() functions are more friendly 1649 * towards high-order requests, this should be changed. 1650 * 1651 * Return: number of left-over bytes in a slab 1652 */ 1653 static size_t calculate_slab_order(struct kmem_cache *cachep, 1654 size_t size, slab_flags_t flags) 1655 { 1656 size_t left_over = 0; 1657 int gfporder; 1658 1659 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1660 unsigned int num; 1661 size_t remainder; 1662 1663 num = cache_estimate(gfporder, size, flags, &remainder); 1664 if (!num) 1665 continue; 1666 1667 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 1668 if (num > SLAB_OBJ_MAX_NUM) 1669 break; 1670 1671 if (flags & CFLGS_OFF_SLAB) { 1672 struct kmem_cache *freelist_cache; 1673 size_t freelist_size; 1674 size_t freelist_cache_size; 1675 1676 freelist_size = num * sizeof(freelist_idx_t); 1677 if (freelist_size > KMALLOC_MAX_CACHE_SIZE) { 1678 freelist_cache_size = PAGE_SIZE << get_order(freelist_size); 1679 } else { 1680 freelist_cache = kmalloc_slab(freelist_size, 0u); 1681 if (!freelist_cache) 1682 continue; 1683 freelist_cache_size = freelist_cache->size; 1684 1685 /* 1686 * Needed to avoid possible looping condition 1687 * in cache_grow_begin() 1688 */ 1689 if (OFF_SLAB(freelist_cache)) 1690 continue; 1691 } 1692 1693 /* check if off slab has enough benefit */ 1694 if (freelist_cache_size > cachep->size / 2) 1695 continue; 1696 } 1697 1698 /* Found something acceptable - save it away */ 1699 cachep->num = num; 1700 cachep->gfporder = gfporder; 1701 left_over = remainder; 1702 1703 /* 1704 * A VFS-reclaimable slab tends to have most allocations 1705 * as GFP_NOFS and we really don't want to have to be allocating 1706 * higher-order pages when we are unable to shrink dcache. 1707 */ 1708 if (flags & SLAB_RECLAIM_ACCOUNT) 1709 break; 1710 1711 /* 1712 * Large number of objects is good, but very large slabs are 1713 * currently bad for the gfp()s. 1714 */ 1715 if (gfporder >= slab_max_order) 1716 break; 1717 1718 /* 1719 * Acceptable internal fragmentation? 1720 */ 1721 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1722 break; 1723 } 1724 return left_over; 1725 } 1726 1727 static struct array_cache __percpu *alloc_kmem_cache_cpus( 1728 struct kmem_cache *cachep, int entries, int batchcount) 1729 { 1730 int cpu; 1731 size_t size; 1732 struct array_cache __percpu *cpu_cache; 1733 1734 size = sizeof(void *) * entries + sizeof(struct array_cache); 1735 cpu_cache = __alloc_percpu(size, sizeof(void *)); 1736 1737 if (!cpu_cache) 1738 return NULL; 1739 1740 for_each_possible_cpu(cpu) { 1741 init_arraycache(per_cpu_ptr(cpu_cache, cpu), 1742 entries, batchcount); 1743 } 1744 1745 return cpu_cache; 1746 } 1747 1748 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 1749 { 1750 if (slab_state >= FULL) 1751 return enable_cpucache(cachep, gfp); 1752 1753 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); 1754 if (!cachep->cpu_cache) 1755 return 1; 1756 1757 if (slab_state == DOWN) { 1758 /* Creation of first cache (kmem_cache). */ 1759 set_up_node(kmem_cache, CACHE_CACHE); 1760 } else if (slab_state == PARTIAL) { 1761 /* For kmem_cache_node */ 1762 set_up_node(cachep, SIZE_NODE); 1763 } else { 1764 int node; 1765 1766 for_each_online_node(node) { 1767 cachep->node[node] = kmalloc_node( 1768 sizeof(struct kmem_cache_node), gfp, node); 1769 BUG_ON(!cachep->node[node]); 1770 kmem_cache_node_init(cachep->node[node]); 1771 } 1772 } 1773 1774 cachep->node[numa_mem_id()]->next_reap = 1775 jiffies + REAPTIMEOUT_NODE + 1776 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1777 1778 cpu_cache_get(cachep)->avail = 0; 1779 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 1780 cpu_cache_get(cachep)->batchcount = 1; 1781 cpu_cache_get(cachep)->touched = 0; 1782 cachep->batchcount = 1; 1783 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1784 return 0; 1785 } 1786 1787 slab_flags_t kmem_cache_flags(unsigned int object_size, 1788 slab_flags_t flags, const char *name) 1789 { 1790 return flags; 1791 } 1792 1793 struct kmem_cache * 1794 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 1795 slab_flags_t flags, void (*ctor)(void *)) 1796 { 1797 struct kmem_cache *cachep; 1798 1799 cachep = find_mergeable(size, align, flags, name, ctor); 1800 if (cachep) { 1801 cachep->refcount++; 1802 1803 /* 1804 * Adjust the object sizes so that we clear 1805 * the complete object on kzalloc. 1806 */ 1807 cachep->object_size = max_t(int, cachep->object_size, size); 1808 } 1809 return cachep; 1810 } 1811 1812 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, 1813 size_t size, slab_flags_t flags) 1814 { 1815 size_t left; 1816 1817 cachep->num = 0; 1818 1819 /* 1820 * If slab auto-initialization on free is enabled, store the freelist 1821 * off-slab, so that its contents don't end up in one of the allocated 1822 * objects. 1823 */ 1824 if (unlikely(slab_want_init_on_free(cachep))) 1825 return false; 1826 1827 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) 1828 return false; 1829 1830 left = calculate_slab_order(cachep, size, 1831 flags | CFLGS_OBJFREELIST_SLAB); 1832 if (!cachep->num) 1833 return false; 1834 1835 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) 1836 return false; 1837 1838 cachep->colour = left / cachep->colour_off; 1839 1840 return true; 1841 } 1842 1843 static bool set_off_slab_cache(struct kmem_cache *cachep, 1844 size_t size, slab_flags_t flags) 1845 { 1846 size_t left; 1847 1848 cachep->num = 0; 1849 1850 /* 1851 * Always use on-slab management when SLAB_NOLEAKTRACE 1852 * to avoid recursive calls into kmemleak. 1853 */ 1854 if (flags & SLAB_NOLEAKTRACE) 1855 return false; 1856 1857 /* 1858 * Size is large, assume best to place the slab management obj 1859 * off-slab (should allow better packing of objs). 1860 */ 1861 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); 1862 if (!cachep->num) 1863 return false; 1864 1865 /* 1866 * If the slab has been placed off-slab, and we have enough space then 1867 * move it on-slab. This is at the expense of any extra colouring. 1868 */ 1869 if (left >= cachep->num * sizeof(freelist_idx_t)) 1870 return false; 1871 1872 cachep->colour = left / cachep->colour_off; 1873 1874 return true; 1875 } 1876 1877 static bool set_on_slab_cache(struct kmem_cache *cachep, 1878 size_t size, slab_flags_t flags) 1879 { 1880 size_t left; 1881 1882 cachep->num = 0; 1883 1884 left = calculate_slab_order(cachep, size, flags); 1885 if (!cachep->num) 1886 return false; 1887 1888 cachep->colour = left / cachep->colour_off; 1889 1890 return true; 1891 } 1892 1893 /** 1894 * __kmem_cache_create - Create a cache. 1895 * @cachep: cache management descriptor 1896 * @flags: SLAB flags 1897 * 1898 * Returns a ptr to the cache on success, NULL on failure. 1899 * Cannot be called within an int, but can be interrupted. 1900 * The @ctor is run when new pages are allocated by the cache. 1901 * 1902 * The flags are 1903 * 1904 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 1905 * to catch references to uninitialised memory. 1906 * 1907 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1908 * for buffer overruns. 1909 * 1910 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1911 * cacheline. This can be beneficial if you're counting cycles as closely 1912 * as davem. 1913 * 1914 * Return: a pointer to the created cache or %NULL in case of error 1915 */ 1916 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) 1917 { 1918 size_t ralign = BYTES_PER_WORD; 1919 gfp_t gfp; 1920 int err; 1921 unsigned int size = cachep->size; 1922 1923 #if DEBUG 1924 #if FORCED_DEBUG 1925 /* 1926 * Enable redzoning and last user accounting, except for caches with 1927 * large objects, if the increased size would increase the object size 1928 * above the next power of two: caches with object sizes just above a 1929 * power of two have a significant amount of internal fragmentation. 1930 */ 1931 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 1932 2 * sizeof(unsigned long long))) 1933 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 1934 if (!(flags & SLAB_TYPESAFE_BY_RCU)) 1935 flags |= SLAB_POISON; 1936 #endif 1937 #endif 1938 1939 /* 1940 * Check that size is in terms of words. This is needed to avoid 1941 * unaligned accesses for some archs when redzoning is used, and makes 1942 * sure any on-slab bufctl's are also correctly aligned. 1943 */ 1944 size = ALIGN(size, BYTES_PER_WORD); 1945 1946 if (flags & SLAB_RED_ZONE) { 1947 ralign = REDZONE_ALIGN; 1948 /* If redzoning, ensure that the second redzone is suitably 1949 * aligned, by adjusting the object size accordingly. */ 1950 size = ALIGN(size, REDZONE_ALIGN); 1951 } 1952 1953 /* 3) caller mandated alignment */ 1954 if (ralign < cachep->align) { 1955 ralign = cachep->align; 1956 } 1957 /* disable debug if necessary */ 1958 if (ralign > __alignof__(unsigned long long)) 1959 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 1960 /* 1961 * 4) Store it. 1962 */ 1963 cachep->align = ralign; 1964 cachep->colour_off = cache_line_size(); 1965 /* Offset must be a multiple of the alignment. */ 1966 if (cachep->colour_off < cachep->align) 1967 cachep->colour_off = cachep->align; 1968 1969 if (slab_is_available()) 1970 gfp = GFP_KERNEL; 1971 else 1972 gfp = GFP_NOWAIT; 1973 1974 #if DEBUG 1975 1976 /* 1977 * Both debugging options require word-alignment which is calculated 1978 * into align above. 1979 */ 1980 if (flags & SLAB_RED_ZONE) { 1981 /* add space for red zone words */ 1982 cachep->obj_offset += sizeof(unsigned long long); 1983 size += 2 * sizeof(unsigned long long); 1984 } 1985 if (flags & SLAB_STORE_USER) { 1986 /* user store requires one word storage behind the end of 1987 * the real object. But if the second red zone needs to be 1988 * aligned to 64 bits, we must allow that much space. 1989 */ 1990 if (flags & SLAB_RED_ZONE) 1991 size += REDZONE_ALIGN; 1992 else 1993 size += BYTES_PER_WORD; 1994 } 1995 #endif 1996 1997 kasan_cache_create(cachep, &size, &flags); 1998 1999 size = ALIGN(size, cachep->align); 2000 /* 2001 * We should restrict the number of objects in a slab to implement 2002 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2003 */ 2004 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2005 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2006 2007 #if DEBUG 2008 /* 2009 * To activate debug pagealloc, off-slab management is necessary 2010 * requirement. In early phase of initialization, small sized slab 2011 * doesn't get initialized so it would not be possible. So, we need 2012 * to check size >= 256. It guarantees that all necessary small 2013 * sized slab is initialized in current slab initialization sequence. 2014 */ 2015 if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && 2016 size >= 256 && cachep->object_size > cache_line_size()) { 2017 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { 2018 size_t tmp_size = ALIGN(size, PAGE_SIZE); 2019 2020 if (set_off_slab_cache(cachep, tmp_size, flags)) { 2021 flags |= CFLGS_OFF_SLAB; 2022 cachep->obj_offset += tmp_size - size; 2023 size = tmp_size; 2024 goto done; 2025 } 2026 } 2027 } 2028 #endif 2029 2030 if (set_objfreelist_slab_cache(cachep, size, flags)) { 2031 flags |= CFLGS_OBJFREELIST_SLAB; 2032 goto done; 2033 } 2034 2035 if (set_off_slab_cache(cachep, size, flags)) { 2036 flags |= CFLGS_OFF_SLAB; 2037 goto done; 2038 } 2039 2040 if (set_on_slab_cache(cachep, size, flags)) 2041 goto done; 2042 2043 return -E2BIG; 2044 2045 done: 2046 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); 2047 cachep->flags = flags; 2048 cachep->allocflags = __GFP_COMP; 2049 if (flags & SLAB_CACHE_DMA) 2050 cachep->allocflags |= GFP_DMA; 2051 if (flags & SLAB_CACHE_DMA32) 2052 cachep->allocflags |= GFP_DMA32; 2053 if (flags & SLAB_RECLAIM_ACCOUNT) 2054 cachep->allocflags |= __GFP_RECLAIMABLE; 2055 cachep->size = size; 2056 cachep->reciprocal_buffer_size = reciprocal_value(size); 2057 2058 #if DEBUG 2059 /* 2060 * If we're going to use the generic kernel_map_pages() 2061 * poisoning, then it's going to smash the contents of 2062 * the redzone and userword anyhow, so switch them off. 2063 */ 2064 if (IS_ENABLED(CONFIG_PAGE_POISONING) && 2065 (cachep->flags & SLAB_POISON) && 2066 is_debug_pagealloc_cache(cachep)) 2067 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2068 #endif 2069 2070 err = setup_cpu_cache(cachep, gfp); 2071 if (err) { 2072 __kmem_cache_release(cachep); 2073 return err; 2074 } 2075 2076 return 0; 2077 } 2078 2079 #if DEBUG 2080 static void check_irq_off(void) 2081 { 2082 BUG_ON(!irqs_disabled()); 2083 } 2084 2085 static void check_irq_on(void) 2086 { 2087 BUG_ON(irqs_disabled()); 2088 } 2089 2090 static void check_mutex_acquired(void) 2091 { 2092 BUG_ON(!mutex_is_locked(&slab_mutex)); 2093 } 2094 2095 static void check_spinlock_acquired(struct kmem_cache *cachep) 2096 { 2097 #ifdef CONFIG_SMP 2098 check_irq_off(); 2099 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); 2100 #endif 2101 } 2102 2103 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2104 { 2105 #ifdef CONFIG_SMP 2106 check_irq_off(); 2107 assert_spin_locked(&get_node(cachep, node)->list_lock); 2108 #endif 2109 } 2110 2111 #else 2112 #define check_irq_off() do { } while(0) 2113 #define check_irq_on() do { } while(0) 2114 #define check_mutex_acquired() do { } while(0) 2115 #define check_spinlock_acquired(x) do { } while(0) 2116 #define check_spinlock_acquired_node(x, y) do { } while(0) 2117 #endif 2118 2119 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, 2120 int node, bool free_all, struct list_head *list) 2121 { 2122 int tofree; 2123 2124 if (!ac || !ac->avail) 2125 return; 2126 2127 tofree = free_all ? ac->avail : (ac->limit + 4) / 5; 2128 if (tofree > ac->avail) 2129 tofree = (ac->avail + 1) / 2; 2130 2131 free_block(cachep, ac->entry, tofree, node, list); 2132 ac->avail -= tofree; 2133 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); 2134 } 2135 2136 static void do_drain(void *arg) 2137 { 2138 struct kmem_cache *cachep = arg; 2139 struct array_cache *ac; 2140 int node = numa_mem_id(); 2141 struct kmem_cache_node *n; 2142 LIST_HEAD(list); 2143 2144 check_irq_off(); 2145 ac = cpu_cache_get(cachep); 2146 n = get_node(cachep, node); 2147 spin_lock(&n->list_lock); 2148 free_block(cachep, ac->entry, ac->avail, node, &list); 2149 spin_unlock(&n->list_lock); 2150 ac->avail = 0; 2151 slabs_destroy(cachep, &list); 2152 } 2153 2154 static void drain_cpu_caches(struct kmem_cache *cachep) 2155 { 2156 struct kmem_cache_node *n; 2157 int node; 2158 LIST_HEAD(list); 2159 2160 on_each_cpu(do_drain, cachep, 1); 2161 check_irq_on(); 2162 for_each_kmem_cache_node(cachep, node, n) 2163 if (n->alien) 2164 drain_alien_cache(cachep, n->alien); 2165 2166 for_each_kmem_cache_node(cachep, node, n) { 2167 spin_lock_irq(&n->list_lock); 2168 drain_array_locked(cachep, n->shared, node, true, &list); 2169 spin_unlock_irq(&n->list_lock); 2170 2171 slabs_destroy(cachep, &list); 2172 } 2173 } 2174 2175 /* 2176 * Remove slabs from the list of free slabs. 2177 * Specify the number of slabs to drain in tofree. 2178 * 2179 * Returns the actual number of slabs released. 2180 */ 2181 static int drain_freelist(struct kmem_cache *cache, 2182 struct kmem_cache_node *n, int tofree) 2183 { 2184 struct list_head *p; 2185 int nr_freed; 2186 struct slab *slab; 2187 2188 nr_freed = 0; 2189 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2190 2191 spin_lock_irq(&n->list_lock); 2192 p = n->slabs_free.prev; 2193 if (p == &n->slabs_free) { 2194 spin_unlock_irq(&n->list_lock); 2195 goto out; 2196 } 2197 2198 slab = list_entry(p, struct slab, slab_list); 2199 list_del(&slab->slab_list); 2200 n->free_slabs--; 2201 n->total_slabs--; 2202 /* 2203 * Safe to drop the lock. The slab is no longer linked 2204 * to the cache. 2205 */ 2206 n->free_objects -= cache->num; 2207 spin_unlock_irq(&n->list_lock); 2208 slab_destroy(cache, slab); 2209 nr_freed++; 2210 } 2211 out: 2212 return nr_freed; 2213 } 2214 2215 bool __kmem_cache_empty(struct kmem_cache *s) 2216 { 2217 int node; 2218 struct kmem_cache_node *n; 2219 2220 for_each_kmem_cache_node(s, node, n) 2221 if (!list_empty(&n->slabs_full) || 2222 !list_empty(&n->slabs_partial)) 2223 return false; 2224 return true; 2225 } 2226 2227 int __kmem_cache_shrink(struct kmem_cache *cachep) 2228 { 2229 int ret = 0; 2230 int node; 2231 struct kmem_cache_node *n; 2232 2233 drain_cpu_caches(cachep); 2234 2235 check_irq_on(); 2236 for_each_kmem_cache_node(cachep, node, n) { 2237 drain_freelist(cachep, n, INT_MAX); 2238 2239 ret += !list_empty(&n->slabs_full) || 2240 !list_empty(&n->slabs_partial); 2241 } 2242 return (ret ? 1 : 0); 2243 } 2244 2245 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2246 { 2247 return __kmem_cache_shrink(cachep); 2248 } 2249 2250 void __kmem_cache_release(struct kmem_cache *cachep) 2251 { 2252 int i; 2253 struct kmem_cache_node *n; 2254 2255 cache_random_seq_destroy(cachep); 2256 2257 free_percpu(cachep->cpu_cache); 2258 2259 /* NUMA: free the node structures */ 2260 for_each_kmem_cache_node(cachep, i, n) { 2261 kfree(n->shared); 2262 free_alien_cache(n->alien); 2263 kfree(n); 2264 cachep->node[i] = NULL; 2265 } 2266 } 2267 2268 /* 2269 * Get the memory for a slab management obj. 2270 * 2271 * For a slab cache when the slab descriptor is off-slab, the 2272 * slab descriptor can't come from the same cache which is being created, 2273 * Because if it is the case, that means we defer the creation of 2274 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2275 * And we eventually call down to __kmem_cache_create(), which 2276 * in turn looks up in the kmalloc_{dma,}_caches for the desired-size one. 2277 * This is a "chicken-and-egg" problem. 2278 * 2279 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2280 * which are all initialized during kmem_cache_init(). 2281 */ 2282 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2283 struct slab *slab, int colour_off, 2284 gfp_t local_flags, int nodeid) 2285 { 2286 void *freelist; 2287 void *addr = slab_address(slab); 2288 2289 slab->s_mem = addr + colour_off; 2290 slab->active = 0; 2291 2292 if (OBJFREELIST_SLAB(cachep)) 2293 freelist = NULL; 2294 else if (OFF_SLAB(cachep)) { 2295 /* Slab management obj is off-slab. */ 2296 freelist = kmalloc_node(cachep->freelist_size, 2297 local_flags, nodeid); 2298 } else { 2299 /* We will use last bytes at the slab for freelist */ 2300 freelist = addr + (PAGE_SIZE << cachep->gfporder) - 2301 cachep->freelist_size; 2302 } 2303 2304 return freelist; 2305 } 2306 2307 static inline freelist_idx_t get_free_obj(struct slab *slab, unsigned int idx) 2308 { 2309 return ((freelist_idx_t *) slab->freelist)[idx]; 2310 } 2311 2312 static inline void set_free_obj(struct slab *slab, 2313 unsigned int idx, freelist_idx_t val) 2314 { 2315 ((freelist_idx_t *)(slab->freelist))[idx] = val; 2316 } 2317 2318 static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) 2319 { 2320 #if DEBUG 2321 int i; 2322 2323 for (i = 0; i < cachep->num; i++) { 2324 void *objp = index_to_obj(cachep, slab, i); 2325 2326 if (cachep->flags & SLAB_STORE_USER) 2327 *dbg_userword(cachep, objp) = NULL; 2328 2329 if (cachep->flags & SLAB_RED_ZONE) { 2330 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2331 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2332 } 2333 /* 2334 * Constructors are not allowed to allocate memory from the same 2335 * cache which they are a constructor for. Otherwise, deadlock. 2336 * They must also be threaded. 2337 */ 2338 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { 2339 kasan_unpoison_object_data(cachep, 2340 objp + obj_offset(cachep)); 2341 cachep->ctor(objp + obj_offset(cachep)); 2342 kasan_poison_object_data( 2343 cachep, objp + obj_offset(cachep)); 2344 } 2345 2346 if (cachep->flags & SLAB_RED_ZONE) { 2347 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2348 slab_error(cachep, "constructor overwrote the end of an object"); 2349 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2350 slab_error(cachep, "constructor overwrote the start of an object"); 2351 } 2352 /* need to poison the objs? */ 2353 if (cachep->flags & SLAB_POISON) { 2354 poison_obj(cachep, objp, POISON_FREE); 2355 slab_kernel_map(cachep, objp, 0); 2356 } 2357 } 2358 #endif 2359 } 2360 2361 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2362 /* Hold information during a freelist initialization */ 2363 union freelist_init_state { 2364 struct { 2365 unsigned int pos; 2366 unsigned int *list; 2367 unsigned int count; 2368 }; 2369 struct rnd_state rnd_state; 2370 }; 2371 2372 /* 2373 * Initialize the state based on the randomization method available. 2374 * return true if the pre-computed list is available, false otherwise. 2375 */ 2376 static bool freelist_state_initialize(union freelist_init_state *state, 2377 struct kmem_cache *cachep, 2378 unsigned int count) 2379 { 2380 bool ret; 2381 unsigned int rand; 2382 2383 /* Use best entropy available to define a random shift */ 2384 rand = get_random_u32(); 2385 2386 /* Use a random state if the pre-computed list is not available */ 2387 if (!cachep->random_seq) { 2388 prandom_seed_state(&state->rnd_state, rand); 2389 ret = false; 2390 } else { 2391 state->list = cachep->random_seq; 2392 state->count = count; 2393 state->pos = rand % count; 2394 ret = true; 2395 } 2396 return ret; 2397 } 2398 2399 /* Get the next entry on the list and randomize it using a random shift */ 2400 static freelist_idx_t next_random_slot(union freelist_init_state *state) 2401 { 2402 if (state->pos >= state->count) 2403 state->pos = 0; 2404 return state->list[state->pos++]; 2405 } 2406 2407 /* Swap two freelist entries */ 2408 static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) 2409 { 2410 swap(((freelist_idx_t *) slab->freelist)[a], 2411 ((freelist_idx_t *) slab->freelist)[b]); 2412 } 2413 2414 /* 2415 * Shuffle the freelist initialization state based on pre-computed lists. 2416 * return true if the list was successfully shuffled, false otherwise. 2417 */ 2418 static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) 2419 { 2420 unsigned int objfreelist = 0, i, rand, count = cachep->num; 2421 union freelist_init_state state; 2422 bool precomputed; 2423 2424 if (count < 2) 2425 return false; 2426 2427 precomputed = freelist_state_initialize(&state, cachep, count); 2428 2429 /* Take a random entry as the objfreelist */ 2430 if (OBJFREELIST_SLAB(cachep)) { 2431 if (!precomputed) 2432 objfreelist = count - 1; 2433 else 2434 objfreelist = next_random_slot(&state); 2435 slab->freelist = index_to_obj(cachep, slab, objfreelist) + 2436 obj_offset(cachep); 2437 count--; 2438 } 2439 2440 /* 2441 * On early boot, generate the list dynamically. 2442 * Later use a pre-computed list for speed. 2443 */ 2444 if (!precomputed) { 2445 for (i = 0; i < count; i++) 2446 set_free_obj(slab, i, i); 2447 2448 /* Fisher-Yates shuffle */ 2449 for (i = count - 1; i > 0; i--) { 2450 rand = prandom_u32_state(&state.rnd_state); 2451 rand %= (i + 1); 2452 swap_free_obj(slab, i, rand); 2453 } 2454 } else { 2455 for (i = 0; i < count; i++) 2456 set_free_obj(slab, i, next_random_slot(&state)); 2457 } 2458 2459 if (OBJFREELIST_SLAB(cachep)) 2460 set_free_obj(slab, cachep->num - 1, objfreelist); 2461 2462 return true; 2463 } 2464 #else 2465 static inline bool shuffle_freelist(struct kmem_cache *cachep, 2466 struct slab *slab) 2467 { 2468 return false; 2469 } 2470 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2471 2472 static void cache_init_objs(struct kmem_cache *cachep, 2473 struct slab *slab) 2474 { 2475 int i; 2476 void *objp; 2477 bool shuffled; 2478 2479 cache_init_objs_debug(cachep, slab); 2480 2481 /* Try to randomize the freelist if enabled */ 2482 shuffled = shuffle_freelist(cachep, slab); 2483 2484 if (!shuffled && OBJFREELIST_SLAB(cachep)) { 2485 slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) + 2486 obj_offset(cachep); 2487 } 2488 2489 for (i = 0; i < cachep->num; i++) { 2490 objp = index_to_obj(cachep, slab, i); 2491 objp = kasan_init_slab_obj(cachep, objp); 2492 2493 /* constructor could break poison info */ 2494 if (DEBUG == 0 && cachep->ctor) { 2495 kasan_unpoison_object_data(cachep, objp); 2496 cachep->ctor(objp); 2497 kasan_poison_object_data(cachep, objp); 2498 } 2499 2500 if (!shuffled) 2501 set_free_obj(slab, i, i); 2502 } 2503 } 2504 2505 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab) 2506 { 2507 void *objp; 2508 2509 objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active)); 2510 slab->active++; 2511 2512 return objp; 2513 } 2514 2515 static void slab_put_obj(struct kmem_cache *cachep, 2516 struct slab *slab, void *objp) 2517 { 2518 unsigned int objnr = obj_to_index(cachep, slab, objp); 2519 #if DEBUG 2520 unsigned int i; 2521 2522 /* Verify double free bug */ 2523 for (i = slab->active; i < cachep->num; i++) { 2524 if (get_free_obj(slab, i) == objnr) { 2525 pr_err("slab: double free detected in cache '%s', objp %px\n", 2526 cachep->name, objp); 2527 BUG(); 2528 } 2529 } 2530 #endif 2531 slab->active--; 2532 if (!slab->freelist) 2533 slab->freelist = objp + obj_offset(cachep); 2534 2535 set_free_obj(slab, slab->active, objnr); 2536 } 2537 2538 /* 2539 * Grow (by 1) the number of slabs within a cache. This is called by 2540 * kmem_cache_alloc() when there are no active objs left in a cache. 2541 */ 2542 static struct slab *cache_grow_begin(struct kmem_cache *cachep, 2543 gfp_t flags, int nodeid) 2544 { 2545 void *freelist; 2546 size_t offset; 2547 gfp_t local_flags; 2548 int slab_node; 2549 struct kmem_cache_node *n; 2550 struct slab *slab; 2551 2552 /* 2553 * Be lazy and only check for valid flags here, keeping it out of the 2554 * critical path in kmem_cache_alloc(). 2555 */ 2556 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2557 flags = kmalloc_fix_flags(flags); 2558 2559 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 2560 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2561 2562 check_irq_off(); 2563 if (gfpflags_allow_blocking(local_flags)) 2564 local_irq_enable(); 2565 2566 /* 2567 * Get mem for the objs. Attempt to allocate a physical page from 2568 * 'nodeid'. 2569 */ 2570 slab = kmem_getpages(cachep, local_flags, nodeid); 2571 if (!slab) 2572 goto failed; 2573 2574 slab_node = slab_nid(slab); 2575 n = get_node(cachep, slab_node); 2576 2577 /* Get colour for the slab, and cal the next value. */ 2578 n->colour_next++; 2579 if (n->colour_next >= cachep->colour) 2580 n->colour_next = 0; 2581 2582 offset = n->colour_next; 2583 if (offset >= cachep->colour) 2584 offset = 0; 2585 2586 offset *= cachep->colour_off; 2587 2588 /* 2589 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so 2590 * page_address() in the latter returns a non-tagged pointer, 2591 * as it should be for slab pages. 2592 */ 2593 kasan_poison_slab(slab); 2594 2595 /* Get slab management. */ 2596 freelist = alloc_slabmgmt(cachep, slab, offset, 2597 local_flags & ~GFP_CONSTRAINT_MASK, slab_node); 2598 if (OFF_SLAB(cachep) && !freelist) 2599 goto opps1; 2600 2601 slab->slab_cache = cachep; 2602 slab->freelist = freelist; 2603 2604 cache_init_objs(cachep, slab); 2605 2606 if (gfpflags_allow_blocking(local_flags)) 2607 local_irq_disable(); 2608 2609 return slab; 2610 2611 opps1: 2612 kmem_freepages(cachep, slab); 2613 failed: 2614 if (gfpflags_allow_blocking(local_flags)) 2615 local_irq_disable(); 2616 return NULL; 2617 } 2618 2619 static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab) 2620 { 2621 struct kmem_cache_node *n; 2622 void *list = NULL; 2623 2624 check_irq_off(); 2625 2626 if (!slab) 2627 return; 2628 2629 INIT_LIST_HEAD(&slab->slab_list); 2630 n = get_node(cachep, slab_nid(slab)); 2631 2632 spin_lock(&n->list_lock); 2633 n->total_slabs++; 2634 if (!slab->active) { 2635 list_add_tail(&slab->slab_list, &n->slabs_free); 2636 n->free_slabs++; 2637 } else 2638 fixup_slab_list(cachep, n, slab, &list); 2639 2640 STATS_INC_GROWN(cachep); 2641 n->free_objects += cachep->num - slab->active; 2642 spin_unlock(&n->list_lock); 2643 2644 fixup_objfreelist_debug(cachep, &list); 2645 } 2646 2647 #if DEBUG 2648 2649 /* 2650 * Perform extra freeing checks: 2651 * - detect bad pointers. 2652 * - POISON/RED_ZONE checking 2653 */ 2654 static void kfree_debugcheck(const void *objp) 2655 { 2656 if (!virt_addr_valid(objp)) { 2657 pr_err("kfree_debugcheck: out of range ptr %lxh\n", 2658 (unsigned long)objp); 2659 BUG(); 2660 } 2661 } 2662 2663 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2664 { 2665 unsigned long long redzone1, redzone2; 2666 2667 redzone1 = *dbg_redzone1(cache, obj); 2668 redzone2 = *dbg_redzone2(cache, obj); 2669 2670 /* 2671 * Redzone is ok. 2672 */ 2673 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2674 return; 2675 2676 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2677 slab_error(cache, "double free detected"); 2678 else 2679 slab_error(cache, "memory outside object was overwritten"); 2680 2681 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 2682 obj, redzone1, redzone2); 2683 } 2684 2685 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2686 unsigned long caller) 2687 { 2688 unsigned int objnr; 2689 struct slab *slab; 2690 2691 BUG_ON(virt_to_cache(objp) != cachep); 2692 2693 objp -= obj_offset(cachep); 2694 kfree_debugcheck(objp); 2695 slab = virt_to_slab(objp); 2696 2697 if (cachep->flags & SLAB_RED_ZONE) { 2698 verify_redzone_free(cachep, objp); 2699 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2700 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2701 } 2702 if (cachep->flags & SLAB_STORE_USER) 2703 *dbg_userword(cachep, objp) = (void *)caller; 2704 2705 objnr = obj_to_index(cachep, slab, objp); 2706 2707 BUG_ON(objnr >= cachep->num); 2708 BUG_ON(objp != index_to_obj(cachep, slab, objnr)); 2709 2710 if (cachep->flags & SLAB_POISON) { 2711 poison_obj(cachep, objp, POISON_FREE); 2712 slab_kernel_map(cachep, objp, 0); 2713 } 2714 return objp; 2715 } 2716 2717 #else 2718 #define kfree_debugcheck(x) do { } while(0) 2719 #define cache_free_debugcheck(x, objp, z) (objp) 2720 #endif 2721 2722 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 2723 void **list) 2724 { 2725 #if DEBUG 2726 void *next = *list; 2727 void *objp; 2728 2729 while (next) { 2730 objp = next - obj_offset(cachep); 2731 next = *(void **)next; 2732 poison_obj(cachep, objp, POISON_FREE); 2733 } 2734 #endif 2735 } 2736 2737 static inline void fixup_slab_list(struct kmem_cache *cachep, 2738 struct kmem_cache_node *n, struct slab *slab, 2739 void **list) 2740 { 2741 /* move slabp to correct slabp list: */ 2742 list_del(&slab->slab_list); 2743 if (slab->active == cachep->num) { 2744 list_add(&slab->slab_list, &n->slabs_full); 2745 if (OBJFREELIST_SLAB(cachep)) { 2746 #if DEBUG 2747 /* Poisoning will be done without holding the lock */ 2748 if (cachep->flags & SLAB_POISON) { 2749 void **objp = slab->freelist; 2750 2751 *objp = *list; 2752 *list = objp; 2753 } 2754 #endif 2755 slab->freelist = NULL; 2756 } 2757 } else 2758 list_add(&slab->slab_list, &n->slabs_partial); 2759 } 2760 2761 /* Try to find non-pfmemalloc slab if needed */ 2762 static noinline struct slab *get_valid_first_slab(struct kmem_cache_node *n, 2763 struct slab *slab, bool pfmemalloc) 2764 { 2765 if (!slab) 2766 return NULL; 2767 2768 if (pfmemalloc) 2769 return slab; 2770 2771 if (!slab_test_pfmemalloc(slab)) 2772 return slab; 2773 2774 /* No need to keep pfmemalloc slab if we have enough free objects */ 2775 if (n->free_objects > n->free_limit) { 2776 slab_clear_pfmemalloc(slab); 2777 return slab; 2778 } 2779 2780 /* Move pfmemalloc slab to the end of list to speed up next search */ 2781 list_del(&slab->slab_list); 2782 if (!slab->active) { 2783 list_add_tail(&slab->slab_list, &n->slabs_free); 2784 n->free_slabs++; 2785 } else 2786 list_add_tail(&slab->slab_list, &n->slabs_partial); 2787 2788 list_for_each_entry(slab, &n->slabs_partial, slab_list) { 2789 if (!slab_test_pfmemalloc(slab)) 2790 return slab; 2791 } 2792 2793 n->free_touched = 1; 2794 list_for_each_entry(slab, &n->slabs_free, slab_list) { 2795 if (!slab_test_pfmemalloc(slab)) { 2796 n->free_slabs--; 2797 return slab; 2798 } 2799 } 2800 2801 return NULL; 2802 } 2803 2804 static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) 2805 { 2806 struct slab *slab; 2807 2808 assert_spin_locked(&n->list_lock); 2809 slab = list_first_entry_or_null(&n->slabs_partial, struct slab, 2810 slab_list); 2811 if (!slab) { 2812 n->free_touched = 1; 2813 slab = list_first_entry_or_null(&n->slabs_free, struct slab, 2814 slab_list); 2815 if (slab) 2816 n->free_slabs--; 2817 } 2818 2819 if (sk_memalloc_socks()) 2820 slab = get_valid_first_slab(n, slab, pfmemalloc); 2821 2822 return slab; 2823 } 2824 2825 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, 2826 struct kmem_cache_node *n, gfp_t flags) 2827 { 2828 struct slab *slab; 2829 void *obj; 2830 void *list = NULL; 2831 2832 if (!gfp_pfmemalloc_allowed(flags)) 2833 return NULL; 2834 2835 spin_lock(&n->list_lock); 2836 slab = get_first_slab(n, true); 2837 if (!slab) { 2838 spin_unlock(&n->list_lock); 2839 return NULL; 2840 } 2841 2842 obj = slab_get_obj(cachep, slab); 2843 n->free_objects--; 2844 2845 fixup_slab_list(cachep, n, slab, &list); 2846 2847 spin_unlock(&n->list_lock); 2848 fixup_objfreelist_debug(cachep, &list); 2849 2850 return obj; 2851 } 2852 2853 /* 2854 * Slab list should be fixed up by fixup_slab_list() for existing slab 2855 * or cache_grow_end() for new slab 2856 */ 2857 static __always_inline int alloc_block(struct kmem_cache *cachep, 2858 struct array_cache *ac, struct slab *slab, int batchcount) 2859 { 2860 /* 2861 * There must be at least one object available for 2862 * allocation. 2863 */ 2864 BUG_ON(slab->active >= cachep->num); 2865 2866 while (slab->active < cachep->num && batchcount--) { 2867 STATS_INC_ALLOCED(cachep); 2868 STATS_INC_ACTIVE(cachep); 2869 STATS_SET_HIGH(cachep); 2870 2871 ac->entry[ac->avail++] = slab_get_obj(cachep, slab); 2872 } 2873 2874 return batchcount; 2875 } 2876 2877 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2878 { 2879 int batchcount; 2880 struct kmem_cache_node *n; 2881 struct array_cache *ac, *shared; 2882 int node; 2883 void *list = NULL; 2884 struct slab *slab; 2885 2886 check_irq_off(); 2887 node = numa_mem_id(); 2888 2889 ac = cpu_cache_get(cachep); 2890 batchcount = ac->batchcount; 2891 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2892 /* 2893 * If there was little recent activity on this cache, then 2894 * perform only a partial refill. Otherwise we could generate 2895 * refill bouncing. 2896 */ 2897 batchcount = BATCHREFILL_LIMIT; 2898 } 2899 n = get_node(cachep, node); 2900 2901 BUG_ON(ac->avail > 0 || !n); 2902 shared = READ_ONCE(n->shared); 2903 if (!n->free_objects && (!shared || !shared->avail)) 2904 goto direct_grow; 2905 2906 spin_lock(&n->list_lock); 2907 shared = READ_ONCE(n->shared); 2908 2909 /* See if we can refill from the shared array */ 2910 if (shared && transfer_objects(ac, shared, batchcount)) { 2911 shared->touched = 1; 2912 goto alloc_done; 2913 } 2914 2915 while (batchcount > 0) { 2916 /* Get slab alloc is to come from. */ 2917 slab = get_first_slab(n, false); 2918 if (!slab) 2919 goto must_grow; 2920 2921 check_spinlock_acquired(cachep); 2922 2923 batchcount = alloc_block(cachep, ac, slab, batchcount); 2924 fixup_slab_list(cachep, n, slab, &list); 2925 } 2926 2927 must_grow: 2928 n->free_objects -= ac->avail; 2929 alloc_done: 2930 spin_unlock(&n->list_lock); 2931 fixup_objfreelist_debug(cachep, &list); 2932 2933 direct_grow: 2934 if (unlikely(!ac->avail)) { 2935 /* Check if we can use obj in pfmemalloc slab */ 2936 if (sk_memalloc_socks()) { 2937 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); 2938 2939 if (obj) 2940 return obj; 2941 } 2942 2943 slab = cache_grow_begin(cachep, gfp_exact_node(flags), node); 2944 2945 /* 2946 * cache_grow_begin() can reenable interrupts, 2947 * then ac could change. 2948 */ 2949 ac = cpu_cache_get(cachep); 2950 if (!ac->avail && slab) 2951 alloc_block(cachep, ac, slab, batchcount); 2952 cache_grow_end(cachep, slab); 2953 2954 if (!ac->avail) 2955 return NULL; 2956 } 2957 ac->touched = 1; 2958 2959 return ac->entry[--ac->avail]; 2960 } 2961 2962 #if DEBUG 2963 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2964 gfp_t flags, void *objp, unsigned long caller) 2965 { 2966 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 2967 if (!objp || is_kfence_address(objp)) 2968 return objp; 2969 if (cachep->flags & SLAB_POISON) { 2970 check_poison_obj(cachep, objp); 2971 slab_kernel_map(cachep, objp, 1); 2972 poison_obj(cachep, objp, POISON_INUSE); 2973 } 2974 if (cachep->flags & SLAB_STORE_USER) 2975 *dbg_userword(cachep, objp) = (void *)caller; 2976 2977 if (cachep->flags & SLAB_RED_ZONE) { 2978 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2979 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2980 slab_error(cachep, "double free, or memory outside object was overwritten"); 2981 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 2982 objp, *dbg_redzone1(cachep, objp), 2983 *dbg_redzone2(cachep, objp)); 2984 } 2985 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2986 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2987 } 2988 2989 objp += obj_offset(cachep); 2990 if (cachep->ctor && cachep->flags & SLAB_POISON) 2991 cachep->ctor(objp); 2992 if ((unsigned long)objp & (arch_slab_minalign() - 1)) { 2993 pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp, 2994 arch_slab_minalign()); 2995 } 2996 return objp; 2997 } 2998 #else 2999 #define cache_alloc_debugcheck_after(a, b, objp, d) (objp) 3000 #endif 3001 3002 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3003 { 3004 void *objp; 3005 struct array_cache *ac; 3006 3007 check_irq_off(); 3008 3009 ac = cpu_cache_get(cachep); 3010 if (likely(ac->avail)) { 3011 ac->touched = 1; 3012 objp = ac->entry[--ac->avail]; 3013 3014 STATS_INC_ALLOCHIT(cachep); 3015 goto out; 3016 } 3017 3018 STATS_INC_ALLOCMISS(cachep); 3019 objp = cache_alloc_refill(cachep, flags); 3020 /* 3021 * the 'ac' may be updated by cache_alloc_refill(), 3022 * and kmemleak_erase() requires its correct value. 3023 */ 3024 ac = cpu_cache_get(cachep); 3025 3026 out: 3027 /* 3028 * To avoid a false negative, if an object that is in one of the 3029 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3030 * treat the array pointers as a reference to the object. 3031 */ 3032 if (objp) 3033 kmemleak_erase(&ac->entry[ac->avail]); 3034 return objp; 3035 } 3036 3037 #ifdef CONFIG_NUMA 3038 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 3039 3040 /* 3041 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. 3042 * 3043 * If we are in_interrupt, then process context, including cpusets and 3044 * mempolicy, may not apply and should not be used for allocation policy. 3045 */ 3046 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3047 { 3048 int nid_alloc, nid_here; 3049 3050 if (in_interrupt() || (flags & __GFP_THISNODE)) 3051 return NULL; 3052 nid_alloc = nid_here = numa_mem_id(); 3053 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3054 nid_alloc = cpuset_slab_spread_node(); 3055 else if (current->mempolicy) 3056 nid_alloc = mempolicy_slab_node(); 3057 if (nid_alloc != nid_here) 3058 return ____cache_alloc_node(cachep, flags, nid_alloc); 3059 return NULL; 3060 } 3061 3062 /* 3063 * Fallback function if there was no memory available and no objects on a 3064 * certain node and fall back is permitted. First we scan all the 3065 * available node for available objects. If that fails then we 3066 * perform an allocation without specifying a node. This allows the page 3067 * allocator to do its reclaim / fallback magic. We then insert the 3068 * slab into the proper nodelist and then allocate from it. 3069 */ 3070 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3071 { 3072 struct zonelist *zonelist; 3073 struct zoneref *z; 3074 struct zone *zone; 3075 enum zone_type highest_zoneidx = gfp_zone(flags); 3076 void *obj = NULL; 3077 struct slab *slab; 3078 int nid; 3079 unsigned int cpuset_mems_cookie; 3080 3081 if (flags & __GFP_THISNODE) 3082 return NULL; 3083 3084 retry_cpuset: 3085 cpuset_mems_cookie = read_mems_allowed_begin(); 3086 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3087 3088 retry: 3089 /* 3090 * Look through allowed nodes for objects available 3091 * from existing per node queues. 3092 */ 3093 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 3094 nid = zone_to_nid(zone); 3095 3096 if (cpuset_zone_allowed(zone, flags) && 3097 get_node(cache, nid) && 3098 get_node(cache, nid)->free_objects) { 3099 obj = ____cache_alloc_node(cache, 3100 gfp_exact_node(flags), nid); 3101 if (obj) 3102 break; 3103 } 3104 } 3105 3106 if (!obj) { 3107 /* 3108 * This allocation will be performed within the constraints 3109 * of the current cpuset / memory policy requirements. 3110 * We may trigger various forms of reclaim on the allowed 3111 * set and go into memory reserves if necessary. 3112 */ 3113 slab = cache_grow_begin(cache, flags, numa_mem_id()); 3114 cache_grow_end(cache, slab); 3115 if (slab) { 3116 nid = slab_nid(slab); 3117 obj = ____cache_alloc_node(cache, 3118 gfp_exact_node(flags), nid); 3119 3120 /* 3121 * Another processor may allocate the objects in 3122 * the slab since we are not holding any locks. 3123 */ 3124 if (!obj) 3125 goto retry; 3126 } 3127 } 3128 3129 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3130 goto retry_cpuset; 3131 return obj; 3132 } 3133 3134 /* 3135 * An interface to enable slab creation on nodeid 3136 */ 3137 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3138 int nodeid) 3139 { 3140 struct slab *slab; 3141 struct kmem_cache_node *n; 3142 void *obj = NULL; 3143 void *list = NULL; 3144 3145 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); 3146 n = get_node(cachep, nodeid); 3147 BUG_ON(!n); 3148 3149 check_irq_off(); 3150 spin_lock(&n->list_lock); 3151 slab = get_first_slab(n, false); 3152 if (!slab) 3153 goto must_grow; 3154 3155 check_spinlock_acquired_node(cachep, nodeid); 3156 3157 STATS_INC_NODEALLOCS(cachep); 3158 STATS_INC_ACTIVE(cachep); 3159 STATS_SET_HIGH(cachep); 3160 3161 BUG_ON(slab->active == cachep->num); 3162 3163 obj = slab_get_obj(cachep, slab); 3164 n->free_objects--; 3165 3166 fixup_slab_list(cachep, n, slab, &list); 3167 3168 spin_unlock(&n->list_lock); 3169 fixup_objfreelist_debug(cachep, &list); 3170 return obj; 3171 3172 must_grow: 3173 spin_unlock(&n->list_lock); 3174 slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); 3175 if (slab) { 3176 /* This slab isn't counted yet so don't update free_objects */ 3177 obj = slab_get_obj(cachep, slab); 3178 } 3179 cache_grow_end(cachep, slab); 3180 3181 return obj ? obj : fallback_alloc(cachep, flags); 3182 } 3183 3184 static __always_inline void * 3185 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3186 { 3187 void *objp = NULL; 3188 int slab_node = numa_mem_id(); 3189 3190 if (nodeid == NUMA_NO_NODE) { 3191 if (current->mempolicy || cpuset_do_slab_mem_spread()) { 3192 objp = alternate_node_alloc(cachep, flags); 3193 if (objp) 3194 goto out; 3195 } 3196 /* 3197 * Use the locally cached objects if possible. 3198 * However ____cache_alloc does not allow fallback 3199 * to other nodes. It may fail while we still have 3200 * objects on other nodes available. 3201 */ 3202 objp = ____cache_alloc(cachep, flags); 3203 nodeid = slab_node; 3204 } else if (nodeid == slab_node) { 3205 objp = ____cache_alloc(cachep, flags); 3206 } else if (!get_node(cachep, nodeid)) { 3207 /* Node not bootstrapped yet */ 3208 objp = fallback_alloc(cachep, flags); 3209 goto out; 3210 } 3211 3212 /* 3213 * We may just have run out of memory on the local node. 3214 * ____cache_alloc_node() knows how to locate memory on other nodes 3215 */ 3216 if (!objp) 3217 objp = ____cache_alloc_node(cachep, flags, nodeid); 3218 out: 3219 return objp; 3220 } 3221 #else 3222 3223 static __always_inline void * 3224 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused) 3225 { 3226 return ____cache_alloc(cachep, flags); 3227 } 3228 3229 #endif /* CONFIG_NUMA */ 3230 3231 static __always_inline void * 3232 slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, 3233 int nodeid, size_t orig_size, unsigned long caller) 3234 { 3235 unsigned long save_flags; 3236 void *objp; 3237 struct obj_cgroup *objcg = NULL; 3238 bool init = false; 3239 3240 flags &= gfp_allowed_mask; 3241 cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags); 3242 if (unlikely(!cachep)) 3243 return NULL; 3244 3245 objp = kfence_alloc(cachep, orig_size, flags); 3246 if (unlikely(objp)) 3247 goto out; 3248 3249 local_irq_save(save_flags); 3250 objp = __do_cache_alloc(cachep, flags, nodeid); 3251 local_irq_restore(save_flags); 3252 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3253 prefetchw(objp); 3254 init = slab_want_init_on_alloc(flags, cachep); 3255 3256 out: 3257 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init); 3258 return objp; 3259 } 3260 3261 static __always_inline void * 3262 slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, 3263 size_t orig_size, unsigned long caller) 3264 { 3265 return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size, 3266 caller); 3267 } 3268 3269 /* 3270 * Caller needs to acquire correct kmem_cache_node's list_lock 3271 * @list: List of detached free slabs should be freed by caller 3272 */ 3273 static void free_block(struct kmem_cache *cachep, void **objpp, 3274 int nr_objects, int node, struct list_head *list) 3275 { 3276 int i; 3277 struct kmem_cache_node *n = get_node(cachep, node); 3278 struct slab *slab; 3279 3280 n->free_objects += nr_objects; 3281 3282 for (i = 0; i < nr_objects; i++) { 3283 void *objp; 3284 struct slab *slab; 3285 3286 objp = objpp[i]; 3287 3288 slab = virt_to_slab(objp); 3289 list_del(&slab->slab_list); 3290 check_spinlock_acquired_node(cachep, node); 3291 slab_put_obj(cachep, slab, objp); 3292 STATS_DEC_ACTIVE(cachep); 3293 3294 /* fixup slab chains */ 3295 if (slab->active == 0) { 3296 list_add(&slab->slab_list, &n->slabs_free); 3297 n->free_slabs++; 3298 } else { 3299 /* Unconditionally move a slab to the end of the 3300 * partial list on free - maximum time for the 3301 * other objects to be freed, too. 3302 */ 3303 list_add_tail(&slab->slab_list, &n->slabs_partial); 3304 } 3305 } 3306 3307 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { 3308 n->free_objects -= cachep->num; 3309 3310 slab = list_last_entry(&n->slabs_free, struct slab, slab_list); 3311 list_move(&slab->slab_list, list); 3312 n->free_slabs--; 3313 n->total_slabs--; 3314 } 3315 } 3316 3317 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3318 { 3319 int batchcount; 3320 struct kmem_cache_node *n; 3321 int node = numa_mem_id(); 3322 LIST_HEAD(list); 3323 3324 batchcount = ac->batchcount; 3325 3326 check_irq_off(); 3327 n = get_node(cachep, node); 3328 spin_lock(&n->list_lock); 3329 if (n->shared) { 3330 struct array_cache *shared_array = n->shared; 3331 int max = shared_array->limit - shared_array->avail; 3332 if (max) { 3333 if (batchcount > max) 3334 batchcount = max; 3335 memcpy(&(shared_array->entry[shared_array->avail]), 3336 ac->entry, sizeof(void *) * batchcount); 3337 shared_array->avail += batchcount; 3338 goto free_done; 3339 } 3340 } 3341 3342 free_block(cachep, ac->entry, batchcount, node, &list); 3343 free_done: 3344 #if STATS 3345 { 3346 int i = 0; 3347 struct slab *slab; 3348 3349 list_for_each_entry(slab, &n->slabs_free, slab_list) { 3350 BUG_ON(slab->active); 3351 3352 i++; 3353 } 3354 STATS_SET_FREEABLE(cachep, i); 3355 } 3356 #endif 3357 spin_unlock(&n->list_lock); 3358 ac->avail -= batchcount; 3359 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3360 slabs_destroy(cachep, &list); 3361 } 3362 3363 /* 3364 * Release an obj back to its cache. If the obj has a constructed state, it must 3365 * be in this state _before_ it is released. Called with disabled ints. 3366 */ 3367 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, 3368 unsigned long caller) 3369 { 3370 bool init; 3371 3372 memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); 3373 3374 if (is_kfence_address(objp)) { 3375 kmemleak_free_recursive(objp, cachep->flags); 3376 __kfence_free(objp); 3377 return; 3378 } 3379 3380 /* 3381 * As memory initialization might be integrated into KASAN, 3382 * kasan_slab_free and initialization memset must be 3383 * kept together to avoid discrepancies in behavior. 3384 */ 3385 init = slab_want_init_on_free(cachep); 3386 if (init && !kasan_has_integrated_init()) 3387 memset(objp, 0, cachep->object_size); 3388 /* KASAN might put objp into memory quarantine, delaying its reuse. */ 3389 if (kasan_slab_free(cachep, objp, init)) 3390 return; 3391 3392 /* Use KCSAN to help debug racy use-after-free. */ 3393 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) 3394 __kcsan_check_access(objp, cachep->object_size, 3395 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 3396 3397 ___cache_free(cachep, objp, caller); 3398 } 3399 3400 void ___cache_free(struct kmem_cache *cachep, void *objp, 3401 unsigned long caller) 3402 { 3403 struct array_cache *ac = cpu_cache_get(cachep); 3404 3405 check_irq_off(); 3406 kmemleak_free_recursive(objp, cachep->flags); 3407 objp = cache_free_debugcheck(cachep, objp, caller); 3408 3409 /* 3410 * Skip calling cache_free_alien() when the platform is not numa. 3411 * This will avoid cache misses that happen while accessing slabp (which 3412 * is per page memory reference) to get nodeid. Instead use a global 3413 * variable to skip the call, which is mostly likely to be present in 3414 * the cache. 3415 */ 3416 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3417 return; 3418 3419 if (ac->avail < ac->limit) { 3420 STATS_INC_FREEHIT(cachep); 3421 } else { 3422 STATS_INC_FREEMISS(cachep); 3423 cache_flusharray(cachep, ac); 3424 } 3425 3426 if (sk_memalloc_socks()) { 3427 struct slab *slab = virt_to_slab(objp); 3428 3429 if (unlikely(slab_test_pfmemalloc(slab))) { 3430 cache_free_pfmemalloc(cachep, slab, objp); 3431 return; 3432 } 3433 } 3434 3435 __free_one(ac, objp); 3436 } 3437 3438 static __always_inline 3439 void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, 3440 gfp_t flags) 3441 { 3442 void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); 3443 3444 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, NUMA_NO_NODE); 3445 3446 return ret; 3447 } 3448 3449 /** 3450 * kmem_cache_alloc - Allocate an object 3451 * @cachep: The cache to allocate from. 3452 * @flags: See kmalloc(). 3453 * 3454 * Allocate an object from this cache. The flags are only relevant 3455 * if the cache has no available objects. 3456 * 3457 * Return: pointer to the new object or %NULL in case of error 3458 */ 3459 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3460 { 3461 return __kmem_cache_alloc_lru(cachep, NULL, flags); 3462 } 3463 EXPORT_SYMBOL(kmem_cache_alloc); 3464 3465 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, 3466 gfp_t flags) 3467 { 3468 return __kmem_cache_alloc_lru(cachep, lru, flags); 3469 } 3470 EXPORT_SYMBOL(kmem_cache_alloc_lru); 3471 3472 static __always_inline void 3473 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, 3474 size_t size, void **p, unsigned long caller) 3475 { 3476 size_t i; 3477 3478 for (i = 0; i < size; i++) 3479 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); 3480 } 3481 3482 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3483 void **p) 3484 { 3485 size_t i; 3486 struct obj_cgroup *objcg = NULL; 3487 3488 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); 3489 if (!s) 3490 return 0; 3491 3492 local_irq_disable(); 3493 for (i = 0; i < size; i++) { 3494 void *objp = kfence_alloc(s, s->object_size, flags) ?: 3495 __do_cache_alloc(s, flags, NUMA_NO_NODE); 3496 3497 if (unlikely(!objp)) 3498 goto error; 3499 p[i] = objp; 3500 } 3501 local_irq_enable(); 3502 3503 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); 3504 3505 /* 3506 * memcg and kmem_cache debug support and memory initialization. 3507 * Done outside of the IRQ disabled section. 3508 */ 3509 slab_post_alloc_hook(s, objcg, flags, size, p, 3510 slab_want_init_on_alloc(flags, s)); 3511 /* FIXME: Trace call missing. Christoph would like a bulk variant */ 3512 return size; 3513 error: 3514 local_irq_enable(); 3515 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); 3516 slab_post_alloc_hook(s, objcg, flags, i, p, false); 3517 kmem_cache_free_bulk(s, i, p); 3518 return 0; 3519 } 3520 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3521 3522 /** 3523 * kmem_cache_alloc_node - Allocate an object on the specified node 3524 * @cachep: The cache to allocate from. 3525 * @flags: See kmalloc(). 3526 * @nodeid: node number of the target node. 3527 * 3528 * Identical to kmem_cache_alloc but it will allocate memory on the given 3529 * node, which can improve the performance for cpu bound structures. 3530 * 3531 * Fallback to other node is possible if __GFP_THISNODE is not set. 3532 * 3533 * Return: pointer to the new object or %NULL in case of error 3534 */ 3535 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3536 { 3537 void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_); 3538 3539 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, nodeid); 3540 3541 return ret; 3542 } 3543 EXPORT_SYMBOL(kmem_cache_alloc_node); 3544 3545 void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3546 int nodeid, size_t orig_size, 3547 unsigned long caller) 3548 { 3549 return slab_alloc_node(cachep, NULL, flags, nodeid, 3550 orig_size, caller); 3551 } 3552 3553 #ifdef CONFIG_PRINTK 3554 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 3555 { 3556 struct kmem_cache *cachep; 3557 unsigned int objnr; 3558 void *objp; 3559 3560 kpp->kp_ptr = object; 3561 kpp->kp_slab = slab; 3562 cachep = slab->slab_cache; 3563 kpp->kp_slab_cache = cachep; 3564 objp = object - obj_offset(cachep); 3565 kpp->kp_data_offset = obj_offset(cachep); 3566 slab = virt_to_slab(objp); 3567 objnr = obj_to_index(cachep, slab, objp); 3568 objp = index_to_obj(cachep, slab, objnr); 3569 kpp->kp_objp = objp; 3570 if (DEBUG && cachep->flags & SLAB_STORE_USER) 3571 kpp->kp_ret = *dbg_userword(cachep, objp); 3572 } 3573 #endif 3574 3575 static __always_inline 3576 void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp, 3577 unsigned long caller) 3578 { 3579 unsigned long flags; 3580 3581 local_irq_save(flags); 3582 debug_check_no_locks_freed(objp, cachep->object_size); 3583 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3584 debug_check_no_obj_freed(objp, cachep->object_size); 3585 __cache_free(cachep, objp, caller); 3586 local_irq_restore(flags); 3587 } 3588 3589 void __kmem_cache_free(struct kmem_cache *cachep, void *objp, 3590 unsigned long caller) 3591 { 3592 __do_kmem_cache_free(cachep, objp, caller); 3593 } 3594 3595 /** 3596 * kmem_cache_free - Deallocate an object 3597 * @cachep: The cache the allocation was from. 3598 * @objp: The previously allocated object. 3599 * 3600 * Free an object which was previously allocated from this 3601 * cache. 3602 */ 3603 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3604 { 3605 cachep = cache_from_obj(cachep, objp); 3606 if (!cachep) 3607 return; 3608 3609 trace_kmem_cache_free(_RET_IP_, objp, cachep); 3610 __do_kmem_cache_free(cachep, objp, _RET_IP_); 3611 } 3612 EXPORT_SYMBOL(kmem_cache_free); 3613 3614 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) 3615 { 3616 3617 local_irq_disable(); 3618 for (int i = 0; i < size; i++) { 3619 void *objp = p[i]; 3620 struct kmem_cache *s; 3621 3622 if (!orig_s) { 3623 struct folio *folio = virt_to_folio(objp); 3624 3625 /* called via kfree_bulk */ 3626 if (!folio_test_slab(folio)) { 3627 local_irq_enable(); 3628 free_large_kmalloc(folio, objp); 3629 local_irq_disable(); 3630 continue; 3631 } 3632 s = folio_slab(folio)->slab_cache; 3633 } else { 3634 s = cache_from_obj(orig_s, objp); 3635 } 3636 3637 if (!s) 3638 continue; 3639 3640 debug_check_no_locks_freed(objp, s->object_size); 3641 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 3642 debug_check_no_obj_freed(objp, s->object_size); 3643 3644 __cache_free(s, objp, _RET_IP_); 3645 } 3646 local_irq_enable(); 3647 3648 /* FIXME: add tracing */ 3649 } 3650 EXPORT_SYMBOL(kmem_cache_free_bulk); 3651 3652 /* 3653 * This initializes kmem_cache_node or resizes various caches for all nodes. 3654 */ 3655 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) 3656 { 3657 int ret; 3658 int node; 3659 struct kmem_cache_node *n; 3660 3661 for_each_online_node(node) { 3662 ret = setup_kmem_cache_node(cachep, node, gfp, true); 3663 if (ret) 3664 goto fail; 3665 3666 } 3667 3668 return 0; 3669 3670 fail: 3671 if (!cachep->list.next) { 3672 /* Cache is not active yet. Roll back what we did */ 3673 node--; 3674 while (node >= 0) { 3675 n = get_node(cachep, node); 3676 if (n) { 3677 kfree(n->shared); 3678 free_alien_cache(n->alien); 3679 kfree(n); 3680 cachep->node[node] = NULL; 3681 } 3682 node--; 3683 } 3684 } 3685 return -ENOMEM; 3686 } 3687 3688 /* Always called with the slab_mutex held */ 3689 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3690 int batchcount, int shared, gfp_t gfp) 3691 { 3692 struct array_cache __percpu *cpu_cache, *prev; 3693 int cpu; 3694 3695 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); 3696 if (!cpu_cache) 3697 return -ENOMEM; 3698 3699 prev = cachep->cpu_cache; 3700 cachep->cpu_cache = cpu_cache; 3701 /* 3702 * Without a previous cpu_cache there's no need to synchronize remote 3703 * cpus, so skip the IPIs. 3704 */ 3705 if (prev) 3706 kick_all_cpus_sync(); 3707 3708 check_irq_on(); 3709 cachep->batchcount = batchcount; 3710 cachep->limit = limit; 3711 cachep->shared = shared; 3712 3713 if (!prev) 3714 goto setup_node; 3715 3716 for_each_online_cpu(cpu) { 3717 LIST_HEAD(list); 3718 int node; 3719 struct kmem_cache_node *n; 3720 struct array_cache *ac = per_cpu_ptr(prev, cpu); 3721 3722 node = cpu_to_mem(cpu); 3723 n = get_node(cachep, node); 3724 spin_lock_irq(&n->list_lock); 3725 free_block(cachep, ac->entry, ac->avail, node, &list); 3726 spin_unlock_irq(&n->list_lock); 3727 slabs_destroy(cachep, &list); 3728 } 3729 free_percpu(prev); 3730 3731 setup_node: 3732 return setup_kmem_cache_nodes(cachep, gfp); 3733 } 3734 3735 /* Called with slab_mutex held always */ 3736 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3737 { 3738 int err; 3739 int limit = 0; 3740 int shared = 0; 3741 int batchcount = 0; 3742 3743 err = cache_random_seq_create(cachep, cachep->num, gfp); 3744 if (err) 3745 goto end; 3746 3747 /* 3748 * The head array serves three purposes: 3749 * - create a LIFO ordering, i.e. return objects that are cache-warm 3750 * - reduce the number of spinlock operations. 3751 * - reduce the number of linked list operations on the slab and 3752 * bufctl chains: array operations are cheaper. 3753 * The numbers are guessed, we should auto-tune as described by 3754 * Bonwick. 3755 */ 3756 if (cachep->size > 131072) 3757 limit = 1; 3758 else if (cachep->size > PAGE_SIZE) 3759 limit = 8; 3760 else if (cachep->size > 1024) 3761 limit = 24; 3762 else if (cachep->size > 256) 3763 limit = 54; 3764 else 3765 limit = 120; 3766 3767 /* 3768 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3769 * allocation behaviour: Most allocs on one cpu, most free operations 3770 * on another cpu. For these cases, an efficient object passing between 3771 * cpus is necessary. This is provided by a shared array. The array 3772 * replaces Bonwick's magazine layer. 3773 * On uniprocessor, it's functionally equivalent (but less efficient) 3774 * to a larger limit. Thus disabled by default. 3775 */ 3776 shared = 0; 3777 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3778 shared = 8; 3779 3780 #if DEBUG 3781 /* 3782 * With debugging enabled, large batchcount lead to excessively long 3783 * periods with disabled local interrupts. Limit the batchcount 3784 */ 3785 if (limit > 32) 3786 limit = 32; 3787 #endif 3788 batchcount = (limit + 1) / 2; 3789 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3790 end: 3791 if (err) 3792 pr_err("enable_cpucache failed for %s, error %d\n", 3793 cachep->name, -err); 3794 return err; 3795 } 3796 3797 /* 3798 * Drain an array if it contains any elements taking the node lock only if 3799 * necessary. Note that the node listlock also protects the array_cache 3800 * if drain_array() is used on the shared array. 3801 */ 3802 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3803 struct array_cache *ac, int node) 3804 { 3805 LIST_HEAD(list); 3806 3807 /* ac from n->shared can be freed if we don't hold the slab_mutex. */ 3808 check_mutex_acquired(); 3809 3810 if (!ac || !ac->avail) 3811 return; 3812 3813 if (ac->touched) { 3814 ac->touched = 0; 3815 return; 3816 } 3817 3818 spin_lock_irq(&n->list_lock); 3819 drain_array_locked(cachep, ac, node, false, &list); 3820 spin_unlock_irq(&n->list_lock); 3821 3822 slabs_destroy(cachep, &list); 3823 } 3824 3825 /** 3826 * cache_reap - Reclaim memory from caches. 3827 * @w: work descriptor 3828 * 3829 * Called from workqueue/eventd every few seconds. 3830 * Purpose: 3831 * - clear the per-cpu caches for this CPU. 3832 * - return freeable pages to the main free memory pool. 3833 * 3834 * If we cannot acquire the cache chain mutex then just give up - we'll try 3835 * again on the next iteration. 3836 */ 3837 static void cache_reap(struct work_struct *w) 3838 { 3839 struct kmem_cache *searchp; 3840 struct kmem_cache_node *n; 3841 int node = numa_mem_id(); 3842 struct delayed_work *work = to_delayed_work(w); 3843 3844 if (!mutex_trylock(&slab_mutex)) 3845 /* Give up. Setup the next iteration. */ 3846 goto out; 3847 3848 list_for_each_entry(searchp, &slab_caches, list) { 3849 check_irq_on(); 3850 3851 /* 3852 * We only take the node lock if absolutely necessary and we 3853 * have established with reasonable certainty that 3854 * we can do some work if the lock was obtained. 3855 */ 3856 n = get_node(searchp, node); 3857 3858 reap_alien(searchp, n); 3859 3860 drain_array(searchp, n, cpu_cache_get(searchp), node); 3861 3862 /* 3863 * These are racy checks but it does not matter 3864 * if we skip one check or scan twice. 3865 */ 3866 if (time_after(n->next_reap, jiffies)) 3867 goto next; 3868 3869 n->next_reap = jiffies + REAPTIMEOUT_NODE; 3870 3871 drain_array(searchp, n, n->shared, node); 3872 3873 if (n->free_touched) 3874 n->free_touched = 0; 3875 else { 3876 int freed; 3877 3878 freed = drain_freelist(searchp, n, (n->free_limit + 3879 5 * searchp->num - 1) / (5 * searchp->num)); 3880 STATS_ADD_REAPED(searchp, freed); 3881 } 3882 next: 3883 cond_resched(); 3884 } 3885 check_irq_on(); 3886 mutex_unlock(&slab_mutex); 3887 next_reap_node(); 3888 out: 3889 /* Set up the next iteration */ 3890 schedule_delayed_work_on(smp_processor_id(), work, 3891 round_jiffies_relative(REAPTIMEOUT_AC)); 3892 } 3893 3894 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 3895 { 3896 unsigned long active_objs, num_objs, active_slabs; 3897 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; 3898 unsigned long free_slabs = 0; 3899 int node; 3900 struct kmem_cache_node *n; 3901 3902 for_each_kmem_cache_node(cachep, node, n) { 3903 check_irq_on(); 3904 spin_lock_irq(&n->list_lock); 3905 3906 total_slabs += n->total_slabs; 3907 free_slabs += n->free_slabs; 3908 free_objs += n->free_objects; 3909 3910 if (n->shared) 3911 shared_avail += n->shared->avail; 3912 3913 spin_unlock_irq(&n->list_lock); 3914 } 3915 num_objs = total_slabs * cachep->num; 3916 active_slabs = total_slabs - free_slabs; 3917 active_objs = num_objs - free_objs; 3918 3919 sinfo->active_objs = active_objs; 3920 sinfo->num_objs = num_objs; 3921 sinfo->active_slabs = active_slabs; 3922 sinfo->num_slabs = total_slabs; 3923 sinfo->shared_avail = shared_avail; 3924 sinfo->limit = cachep->limit; 3925 sinfo->batchcount = cachep->batchcount; 3926 sinfo->shared = cachep->shared; 3927 sinfo->objects_per_slab = cachep->num; 3928 sinfo->cache_order = cachep->gfporder; 3929 } 3930 3931 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 3932 { 3933 #if STATS 3934 { /* node stats */ 3935 unsigned long high = cachep->high_mark; 3936 unsigned long allocs = cachep->num_allocations; 3937 unsigned long grown = cachep->grown; 3938 unsigned long reaped = cachep->reaped; 3939 unsigned long errors = cachep->errors; 3940 unsigned long max_freeable = cachep->max_freeable; 3941 unsigned long node_allocs = cachep->node_allocs; 3942 unsigned long node_frees = cachep->node_frees; 3943 unsigned long overflows = cachep->node_overflow; 3944 3945 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", 3946 allocs, high, grown, 3947 reaped, errors, max_freeable, node_allocs, 3948 node_frees, overflows); 3949 } 3950 /* cpu stats */ 3951 { 3952 unsigned long allochit = atomic_read(&cachep->allochit); 3953 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 3954 unsigned long freehit = atomic_read(&cachep->freehit); 3955 unsigned long freemiss = atomic_read(&cachep->freemiss); 3956 3957 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 3958 allochit, allocmiss, freehit, freemiss); 3959 } 3960 #endif 3961 } 3962 3963 #define MAX_SLABINFO_WRITE 128 3964 /** 3965 * slabinfo_write - Tuning for the slab allocator 3966 * @file: unused 3967 * @buffer: user buffer 3968 * @count: data length 3969 * @ppos: unused 3970 * 3971 * Return: %0 on success, negative error code otherwise. 3972 */ 3973 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 3974 size_t count, loff_t *ppos) 3975 { 3976 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 3977 int limit, batchcount, shared, res; 3978 struct kmem_cache *cachep; 3979 3980 if (count > MAX_SLABINFO_WRITE) 3981 return -EINVAL; 3982 if (copy_from_user(&kbuf, buffer, count)) 3983 return -EFAULT; 3984 kbuf[MAX_SLABINFO_WRITE] = '\0'; 3985 3986 tmp = strchr(kbuf, ' '); 3987 if (!tmp) 3988 return -EINVAL; 3989 *tmp = '\0'; 3990 tmp++; 3991 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 3992 return -EINVAL; 3993 3994 /* Find the cache in the chain of caches. */ 3995 mutex_lock(&slab_mutex); 3996 res = -EINVAL; 3997 list_for_each_entry(cachep, &slab_caches, list) { 3998 if (!strcmp(cachep->name, kbuf)) { 3999 if (limit < 1 || batchcount < 1 || 4000 batchcount > limit || shared < 0) { 4001 res = 0; 4002 } else { 4003 res = do_tune_cpucache(cachep, limit, 4004 batchcount, shared, 4005 GFP_KERNEL); 4006 } 4007 break; 4008 } 4009 } 4010 mutex_unlock(&slab_mutex); 4011 if (res >= 0) 4012 res = count; 4013 return res; 4014 } 4015 4016 #ifdef CONFIG_HARDENED_USERCOPY 4017 /* 4018 * Rejects incorrectly sized objects and objects that are to be copied 4019 * to/from userspace but do not fall entirely within the containing slab 4020 * cache's usercopy region. 4021 * 4022 * Returns NULL if check passes, otherwise const char * to name of cache 4023 * to indicate an error. 4024 */ 4025 void __check_heap_object(const void *ptr, unsigned long n, 4026 const struct slab *slab, bool to_user) 4027 { 4028 struct kmem_cache *cachep; 4029 unsigned int objnr; 4030 unsigned long offset; 4031 4032 ptr = kasan_reset_tag(ptr); 4033 4034 /* Find and validate object. */ 4035 cachep = slab->slab_cache; 4036 objnr = obj_to_index(cachep, slab, (void *)ptr); 4037 BUG_ON(objnr >= cachep->num); 4038 4039 /* Find offset within object. */ 4040 if (is_kfence_address(ptr)) 4041 offset = ptr - kfence_object_start(ptr); 4042 else 4043 offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep); 4044 4045 /* Allow address range falling entirely within usercopy region. */ 4046 if (offset >= cachep->useroffset && 4047 offset - cachep->useroffset <= cachep->usersize && 4048 n <= cachep->useroffset - offset + cachep->usersize) 4049 return; 4050 4051 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); 4052 } 4053 #endif /* CONFIG_HARDENED_USERCOPY */ 4054