1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/slab.c 4 * Written by Mark Hemment, 1996/97. 5 * (markhe@nextd.demon.co.uk) 6 * 7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 8 * 9 * Major cleanup, different bufctl logic, per-cpu arrays 10 * (c) 2000 Manfred Spraul 11 * 12 * Cleanup, make the head arrays unconditional, preparation for NUMA 13 * (c) 2002 Manfred Spraul 14 * 15 * An implementation of the Slab Allocator as described in outline in; 16 * UNIX Internals: The New Frontiers by Uresh Vahalia 17 * Pub: Prentice Hall ISBN 0-13-101908-2 18 * or with a little more detail in; 19 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 20 * Jeff Bonwick (Sun Microsystems). 21 * Presented at: USENIX Summer 1994 Technical Conference 22 * 23 * The memory is organized in caches, one cache for each object type. 24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 25 * Each cache consists out of many slabs (they are small (usually one 26 * page long) and always contiguous), and each slab contains multiple 27 * initialized objects. 28 * 29 * This means, that your constructor is used only for newly allocated 30 * slabs and you must pass objects with the same initializations to 31 * kmem_cache_free. 32 * 33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 34 * normal). If you need a special memory type, then must create a new 35 * cache for that memory type. 36 * 37 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 38 * full slabs with 0 free objects 39 * partial slabs 40 * empty slabs with no allocated objects 41 * 42 * If partial slabs exist, then new allocations come from these slabs, 43 * otherwise from empty slabs or new slabs are allocated. 44 * 45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 47 * 48 * Each cache has a short per-cpu head array, most allocs 49 * and frees go into that array, and if that array overflows, then 1/2 50 * of the entries in the array are given back into the global cache. 51 * The head array is strictly LIFO and should improve the cache hit rates. 52 * On SMP, it additionally reduces the spinlock operations. 53 * 54 * The c_cpuarray may not be read with enabled local interrupts - 55 * it's changed with a smp_call_function(). 56 * 57 * SMP synchronization: 58 * constructors and destructors are called without any locking. 59 * Several members in struct kmem_cache and struct slab never change, they 60 * are accessed without any locking. 61 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 62 * and local interrupts are disabled so slab code is preempt-safe. 63 * The non-constant members are protected with a per-cache irq spinlock. 64 * 65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 66 * in 2000 - many ideas in the current implementation are derived from 67 * his patch. 68 * 69 * Further notes from the original documentation: 70 * 71 * 11 April '97. Started multi-threading - markhe 72 * The global cache-chain is protected by the mutex 'slab_mutex'. 73 * The sem is only needed when accessing/extending the cache-chain, which 74 * can never happen inside an interrupt (kmem_cache_create(), 75 * kmem_cache_shrink() and kmem_cache_reap()). 76 * 77 * At present, each engine can be growing a cache. This should be blocked. 78 * 79 * 15 March 2005. NUMA slab allocator. 80 * Shai Fultheim <shai@scalex86.org>. 81 * Shobhit Dayal <shobhit@calsoftinc.com> 82 * Alok N Kataria <alokk@calsoftinc.com> 83 * Christoph Lameter <christoph@lameter.com> 84 * 85 * Modified the slab allocator to be node aware on NUMA systems. 86 * Each node has its own list of partial, free and full slabs. 87 * All object allocations for a node occur from node specific slab lists. 88 */ 89 90 #include <linux/slab.h> 91 #include <linux/mm.h> 92 #include <linux/poison.h> 93 #include <linux/swap.h> 94 #include <linux/cache.h> 95 #include <linux/interrupt.h> 96 #include <linux/init.h> 97 #include <linux/compiler.h> 98 #include <linux/cpuset.h> 99 #include <linux/proc_fs.h> 100 #include <linux/seq_file.h> 101 #include <linux/notifier.h> 102 #include <linux/kallsyms.h> 103 #include <linux/cpu.h> 104 #include <linux/sysctl.h> 105 #include <linux/module.h> 106 #include <linux/rcupdate.h> 107 #include <linux/string.h> 108 #include <linux/uaccess.h> 109 #include <linux/nodemask.h> 110 #include <linux/kmemleak.h> 111 #include <linux/mempolicy.h> 112 #include <linux/mutex.h> 113 #include <linux/fault-inject.h> 114 #include <linux/rtmutex.h> 115 #include <linux/reciprocal_div.h> 116 #include <linux/debugobjects.h> 117 #include <linux/memory.h> 118 #include <linux/prefetch.h> 119 #include <linux/sched/task_stack.h> 120 121 #include <net/sock.h> 122 123 #include <asm/cacheflush.h> 124 #include <asm/tlbflush.h> 125 #include <asm/page.h> 126 127 #include <trace/events/kmem.h> 128 129 #include "internal.h" 130 131 #include "slab.h" 132 133 /* 134 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 135 * 0 for faster, smaller code (especially in the critical paths). 136 * 137 * STATS - 1 to collect stats for /proc/slabinfo. 138 * 0 for faster, smaller code (especially in the critical paths). 139 * 140 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 141 */ 142 143 #ifdef CONFIG_DEBUG_SLAB 144 #define DEBUG 1 145 #define STATS 1 146 #define FORCED_DEBUG 1 147 #else 148 #define DEBUG 0 149 #define STATS 0 150 #define FORCED_DEBUG 0 151 #endif 152 153 /* Shouldn't this be in a header file somewhere? */ 154 #define BYTES_PER_WORD sizeof(void *) 155 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 156 157 #ifndef ARCH_KMALLOC_FLAGS 158 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 159 #endif 160 161 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 162 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 163 164 #if FREELIST_BYTE_INDEX 165 typedef unsigned char freelist_idx_t; 166 #else 167 typedef unsigned short freelist_idx_t; 168 #endif 169 170 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 171 172 /* 173 * struct array_cache 174 * 175 * Purpose: 176 * - LIFO ordering, to hand out cache-warm objects from _alloc 177 * - reduce the number of linked list operations 178 * - reduce spinlock operations 179 * 180 * The limit is stored in the per-cpu structure to reduce the data cache 181 * footprint. 182 * 183 */ 184 struct array_cache { 185 unsigned int avail; 186 unsigned int limit; 187 unsigned int batchcount; 188 unsigned int touched; 189 void *entry[]; /* 190 * Must have this definition in here for the proper 191 * alignment of array_cache. Also simplifies accessing 192 * the entries. 193 */ 194 }; 195 196 struct alien_cache { 197 spinlock_t lock; 198 struct array_cache ac; 199 }; 200 201 /* 202 * Need this for bootstrapping a per node allocator. 203 */ 204 #define NUM_INIT_LISTS (2 * MAX_NUMNODES) 205 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 206 #define CACHE_CACHE 0 207 #define SIZE_NODE (MAX_NUMNODES) 208 209 static int drain_freelist(struct kmem_cache *cache, 210 struct kmem_cache_node *n, int tofree); 211 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 212 int node, struct list_head *list); 213 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); 214 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 215 static void cache_reap(struct work_struct *unused); 216 217 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 218 void **list); 219 static inline void fixup_slab_list(struct kmem_cache *cachep, 220 struct kmem_cache_node *n, struct page *page, 221 void **list); 222 static int slab_early_init = 1; 223 224 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 225 226 static void kmem_cache_node_init(struct kmem_cache_node *parent) 227 { 228 INIT_LIST_HEAD(&parent->slabs_full); 229 INIT_LIST_HEAD(&parent->slabs_partial); 230 INIT_LIST_HEAD(&parent->slabs_free); 231 parent->total_slabs = 0; 232 parent->free_slabs = 0; 233 parent->shared = NULL; 234 parent->alien = NULL; 235 parent->colour_next = 0; 236 spin_lock_init(&parent->list_lock); 237 parent->free_objects = 0; 238 parent->free_touched = 0; 239 } 240 241 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 242 do { \ 243 INIT_LIST_HEAD(listp); \ 244 list_splice(&get_node(cachep, nodeid)->slab, listp); \ 245 } while (0) 246 247 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 248 do { \ 249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 252 } while (0) 253 254 #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U) 255 #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U) 256 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) 257 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 258 259 #define BATCHREFILL_LIMIT 16 260 /* 261 * Optimization question: fewer reaps means less probability for unnessary 262 * cpucache drain/refill cycles. 263 * 264 * OTOH the cpuarrays can contain lots of objects, 265 * which could lock up otherwise freeable slabs. 266 */ 267 #define REAPTIMEOUT_AC (2*HZ) 268 #define REAPTIMEOUT_NODE (4*HZ) 269 270 #if STATS 271 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 272 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 273 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 274 #define STATS_INC_GROWN(x) ((x)->grown++) 275 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 276 #define STATS_SET_HIGH(x) \ 277 do { \ 278 if ((x)->num_active > (x)->high_mark) \ 279 (x)->high_mark = (x)->num_active; \ 280 } while (0) 281 #define STATS_INC_ERR(x) ((x)->errors++) 282 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 283 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 284 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 285 #define STATS_SET_FREEABLE(x, i) \ 286 do { \ 287 if ((x)->max_freeable < i) \ 288 (x)->max_freeable = i; \ 289 } while (0) 290 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 291 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 292 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 293 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 294 #else 295 #define STATS_INC_ACTIVE(x) do { } while (0) 296 #define STATS_DEC_ACTIVE(x) do { } while (0) 297 #define STATS_INC_ALLOCED(x) do { } while (0) 298 #define STATS_INC_GROWN(x) do { } while (0) 299 #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) 300 #define STATS_SET_HIGH(x) do { } while (0) 301 #define STATS_INC_ERR(x) do { } while (0) 302 #define STATS_INC_NODEALLOCS(x) do { } while (0) 303 #define STATS_INC_NODEFREES(x) do { } while (0) 304 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 305 #define STATS_SET_FREEABLE(x, i) do { } while (0) 306 #define STATS_INC_ALLOCHIT(x) do { } while (0) 307 #define STATS_INC_ALLOCMISS(x) do { } while (0) 308 #define STATS_INC_FREEHIT(x) do { } while (0) 309 #define STATS_INC_FREEMISS(x) do { } while (0) 310 #endif 311 312 #if DEBUG 313 314 /* 315 * memory layout of objects: 316 * 0 : objp 317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 318 * the end of an object is aligned with the end of the real 319 * allocation. Catches writes behind the end of the allocation. 320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 321 * redzone word. 322 * cachep->obj_offset: The real object. 323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 324 * cachep->size - 1* BYTES_PER_WORD: last caller address 325 * [BYTES_PER_WORD long] 326 */ 327 static int obj_offset(struct kmem_cache *cachep) 328 { 329 return cachep->obj_offset; 330 } 331 332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 333 { 334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 335 return (unsigned long long*) (objp + obj_offset(cachep) - 336 sizeof(unsigned long long)); 337 } 338 339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 340 { 341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 342 if (cachep->flags & SLAB_STORE_USER) 343 return (unsigned long long *)(objp + cachep->size - 344 sizeof(unsigned long long) - 345 REDZONE_ALIGN); 346 return (unsigned long long *) (objp + cachep->size - 347 sizeof(unsigned long long)); 348 } 349 350 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 351 { 352 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 353 return (void **)(objp + cachep->size - BYTES_PER_WORD); 354 } 355 356 #else 357 358 #define obj_offset(x) 0 359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 362 363 #endif 364 365 #ifdef CONFIG_DEBUG_SLAB_LEAK 366 367 static inline bool is_store_user_clean(struct kmem_cache *cachep) 368 { 369 return atomic_read(&cachep->store_user_clean) == 1; 370 } 371 372 static inline void set_store_user_clean(struct kmem_cache *cachep) 373 { 374 atomic_set(&cachep->store_user_clean, 1); 375 } 376 377 static inline void set_store_user_dirty(struct kmem_cache *cachep) 378 { 379 if (is_store_user_clean(cachep)) 380 atomic_set(&cachep->store_user_clean, 0); 381 } 382 383 #else 384 static inline void set_store_user_dirty(struct kmem_cache *cachep) {} 385 386 #endif 387 388 /* 389 * Do not go above this order unless 0 objects fit into the slab or 390 * overridden on the command line. 391 */ 392 #define SLAB_MAX_ORDER_HI 1 393 #define SLAB_MAX_ORDER_LO 0 394 static int slab_max_order = SLAB_MAX_ORDER_LO; 395 static bool slab_max_order_set __initdata; 396 397 static inline struct kmem_cache *virt_to_cache(const void *obj) 398 { 399 struct page *page = virt_to_head_page(obj); 400 return page->slab_cache; 401 } 402 403 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, 404 unsigned int idx) 405 { 406 return page->s_mem + cache->size * idx; 407 } 408 409 #define BOOT_CPUCACHE_ENTRIES 1 410 /* internal cache of cache description objs */ 411 static struct kmem_cache kmem_cache_boot = { 412 .batchcount = 1, 413 .limit = BOOT_CPUCACHE_ENTRIES, 414 .shared = 1, 415 .size = sizeof(struct kmem_cache), 416 .name = "kmem_cache", 417 }; 418 419 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 420 421 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 422 { 423 return this_cpu_ptr(cachep->cpu_cache); 424 } 425 426 /* 427 * Calculate the number of objects and left-over bytes for a given buffer size. 428 */ 429 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, 430 slab_flags_t flags, size_t *left_over) 431 { 432 unsigned int num; 433 size_t slab_size = PAGE_SIZE << gfporder; 434 435 /* 436 * The slab management structure can be either off the slab or 437 * on it. For the latter case, the memory allocated for a 438 * slab is used for: 439 * 440 * - @buffer_size bytes for each object 441 * - One freelist_idx_t for each object 442 * 443 * We don't need to consider alignment of freelist because 444 * freelist will be at the end of slab page. The objects will be 445 * at the correct alignment. 446 * 447 * If the slab management structure is off the slab, then the 448 * alignment will already be calculated into the size. Because 449 * the slabs are all pages aligned, the objects will be at the 450 * correct alignment when allocated. 451 */ 452 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { 453 num = slab_size / buffer_size; 454 *left_over = slab_size % buffer_size; 455 } else { 456 num = slab_size / (buffer_size + sizeof(freelist_idx_t)); 457 *left_over = slab_size % 458 (buffer_size + sizeof(freelist_idx_t)); 459 } 460 461 return num; 462 } 463 464 #if DEBUG 465 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 466 467 static void __slab_error(const char *function, struct kmem_cache *cachep, 468 char *msg) 469 { 470 pr_err("slab error in %s(): cache `%s': %s\n", 471 function, cachep->name, msg); 472 dump_stack(); 473 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 474 } 475 #endif 476 477 /* 478 * By default on NUMA we use alien caches to stage the freeing of 479 * objects allocated from other nodes. This causes massive memory 480 * inefficiencies when using fake NUMA setup to split memory into a 481 * large number of small nodes, so it can be disabled on the command 482 * line 483 */ 484 485 static int use_alien_caches __read_mostly = 1; 486 static int __init noaliencache_setup(char *s) 487 { 488 use_alien_caches = 0; 489 return 1; 490 } 491 __setup("noaliencache", noaliencache_setup); 492 493 static int __init slab_max_order_setup(char *str) 494 { 495 get_option(&str, &slab_max_order); 496 slab_max_order = slab_max_order < 0 ? 0 : 497 min(slab_max_order, MAX_ORDER - 1); 498 slab_max_order_set = true; 499 500 return 1; 501 } 502 __setup("slab_max_order=", slab_max_order_setup); 503 504 #ifdef CONFIG_NUMA 505 /* 506 * Special reaping functions for NUMA systems called from cache_reap(). 507 * These take care of doing round robin flushing of alien caches (containing 508 * objects freed on different nodes from which they were allocated) and the 509 * flushing of remote pcps by calling drain_node_pages. 510 */ 511 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 512 513 static void init_reap_node(int cpu) 514 { 515 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), 516 node_online_map); 517 } 518 519 static void next_reap_node(void) 520 { 521 int node = __this_cpu_read(slab_reap_node); 522 523 node = next_node_in(node, node_online_map); 524 __this_cpu_write(slab_reap_node, node); 525 } 526 527 #else 528 #define init_reap_node(cpu) do { } while (0) 529 #define next_reap_node(void) do { } while (0) 530 #endif 531 532 /* 533 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 534 * via the workqueue/eventd. 535 * Add the CPU number into the expiration time to minimize the possibility of 536 * the CPUs getting into lockstep and contending for the global cache chain 537 * lock. 538 */ 539 static void start_cpu_timer(int cpu) 540 { 541 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 542 543 if (reap_work->work.func == NULL) { 544 init_reap_node(cpu); 545 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 546 schedule_delayed_work_on(cpu, reap_work, 547 __round_jiffies_relative(HZ, cpu)); 548 } 549 } 550 551 static void init_arraycache(struct array_cache *ac, int limit, int batch) 552 { 553 /* 554 * The array_cache structures contain pointers to free object. 555 * However, when such objects are allocated or transferred to another 556 * cache the pointers are not cleared and they could be counted as 557 * valid references during a kmemleak scan. Therefore, kmemleak must 558 * not scan such objects. 559 */ 560 kmemleak_no_scan(ac); 561 if (ac) { 562 ac->avail = 0; 563 ac->limit = limit; 564 ac->batchcount = batch; 565 ac->touched = 0; 566 } 567 } 568 569 static struct array_cache *alloc_arraycache(int node, int entries, 570 int batchcount, gfp_t gfp) 571 { 572 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); 573 struct array_cache *ac = NULL; 574 575 ac = kmalloc_node(memsize, gfp, node); 576 init_arraycache(ac, entries, batchcount); 577 return ac; 578 } 579 580 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, 581 struct page *page, void *objp) 582 { 583 struct kmem_cache_node *n; 584 int page_node; 585 LIST_HEAD(list); 586 587 page_node = page_to_nid(page); 588 n = get_node(cachep, page_node); 589 590 spin_lock(&n->list_lock); 591 free_block(cachep, &objp, 1, page_node, &list); 592 spin_unlock(&n->list_lock); 593 594 slabs_destroy(cachep, &list); 595 } 596 597 /* 598 * Transfer objects in one arraycache to another. 599 * Locking must be handled by the caller. 600 * 601 * Return the number of entries transferred. 602 */ 603 static int transfer_objects(struct array_cache *to, 604 struct array_cache *from, unsigned int max) 605 { 606 /* Figure out how many entries to transfer */ 607 int nr = min3(from->avail, max, to->limit - to->avail); 608 609 if (!nr) 610 return 0; 611 612 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 613 sizeof(void *) *nr); 614 615 from->avail -= nr; 616 to->avail += nr; 617 return nr; 618 } 619 620 #ifndef CONFIG_NUMA 621 622 #define drain_alien_cache(cachep, alien) do { } while (0) 623 #define reap_alien(cachep, n) do { } while (0) 624 625 static inline struct alien_cache **alloc_alien_cache(int node, 626 int limit, gfp_t gfp) 627 { 628 return NULL; 629 } 630 631 static inline void free_alien_cache(struct alien_cache **ac_ptr) 632 { 633 } 634 635 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 636 { 637 return 0; 638 } 639 640 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 641 gfp_t flags) 642 { 643 return NULL; 644 } 645 646 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 647 gfp_t flags, int nodeid) 648 { 649 return NULL; 650 } 651 652 static inline gfp_t gfp_exact_node(gfp_t flags) 653 { 654 return flags & ~__GFP_NOFAIL; 655 } 656 657 #else /* CONFIG_NUMA */ 658 659 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 660 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 661 662 static struct alien_cache *__alloc_alien_cache(int node, int entries, 663 int batch, gfp_t gfp) 664 { 665 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); 666 struct alien_cache *alc = NULL; 667 668 alc = kmalloc_node(memsize, gfp, node); 669 init_arraycache(&alc->ac, entries, batch); 670 spin_lock_init(&alc->lock); 671 return alc; 672 } 673 674 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 675 { 676 struct alien_cache **alc_ptr; 677 size_t memsize = sizeof(void *) * nr_node_ids; 678 int i; 679 680 if (limit > 1) 681 limit = 12; 682 alc_ptr = kzalloc_node(memsize, gfp, node); 683 if (!alc_ptr) 684 return NULL; 685 686 for_each_node(i) { 687 if (i == node || !node_online(i)) 688 continue; 689 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); 690 if (!alc_ptr[i]) { 691 for (i--; i >= 0; i--) 692 kfree(alc_ptr[i]); 693 kfree(alc_ptr); 694 return NULL; 695 } 696 } 697 return alc_ptr; 698 } 699 700 static void free_alien_cache(struct alien_cache **alc_ptr) 701 { 702 int i; 703 704 if (!alc_ptr) 705 return; 706 for_each_node(i) 707 kfree(alc_ptr[i]); 708 kfree(alc_ptr); 709 } 710 711 static void __drain_alien_cache(struct kmem_cache *cachep, 712 struct array_cache *ac, int node, 713 struct list_head *list) 714 { 715 struct kmem_cache_node *n = get_node(cachep, node); 716 717 if (ac->avail) { 718 spin_lock(&n->list_lock); 719 /* 720 * Stuff objects into the remote nodes shared array first. 721 * That way we could avoid the overhead of putting the objects 722 * into the free lists and getting them back later. 723 */ 724 if (n->shared) 725 transfer_objects(n->shared, ac, ac->limit); 726 727 free_block(cachep, ac->entry, ac->avail, node, list); 728 ac->avail = 0; 729 spin_unlock(&n->list_lock); 730 } 731 } 732 733 /* 734 * Called from cache_reap() to regularly drain alien caches round robin. 735 */ 736 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 737 { 738 int node = __this_cpu_read(slab_reap_node); 739 740 if (n->alien) { 741 struct alien_cache *alc = n->alien[node]; 742 struct array_cache *ac; 743 744 if (alc) { 745 ac = &alc->ac; 746 if (ac->avail && spin_trylock_irq(&alc->lock)) { 747 LIST_HEAD(list); 748 749 __drain_alien_cache(cachep, ac, node, &list); 750 spin_unlock_irq(&alc->lock); 751 slabs_destroy(cachep, &list); 752 } 753 } 754 } 755 } 756 757 static void drain_alien_cache(struct kmem_cache *cachep, 758 struct alien_cache **alien) 759 { 760 int i = 0; 761 struct alien_cache *alc; 762 struct array_cache *ac; 763 unsigned long flags; 764 765 for_each_online_node(i) { 766 alc = alien[i]; 767 if (alc) { 768 LIST_HEAD(list); 769 770 ac = &alc->ac; 771 spin_lock_irqsave(&alc->lock, flags); 772 __drain_alien_cache(cachep, ac, i, &list); 773 spin_unlock_irqrestore(&alc->lock, flags); 774 slabs_destroy(cachep, &list); 775 } 776 } 777 } 778 779 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, 780 int node, int page_node) 781 { 782 struct kmem_cache_node *n; 783 struct alien_cache *alien = NULL; 784 struct array_cache *ac; 785 LIST_HEAD(list); 786 787 n = get_node(cachep, node); 788 STATS_INC_NODEFREES(cachep); 789 if (n->alien && n->alien[page_node]) { 790 alien = n->alien[page_node]; 791 ac = &alien->ac; 792 spin_lock(&alien->lock); 793 if (unlikely(ac->avail == ac->limit)) { 794 STATS_INC_ACOVERFLOW(cachep); 795 __drain_alien_cache(cachep, ac, page_node, &list); 796 } 797 ac->entry[ac->avail++] = objp; 798 spin_unlock(&alien->lock); 799 slabs_destroy(cachep, &list); 800 } else { 801 n = get_node(cachep, page_node); 802 spin_lock(&n->list_lock); 803 free_block(cachep, &objp, 1, page_node, &list); 804 spin_unlock(&n->list_lock); 805 slabs_destroy(cachep, &list); 806 } 807 return 1; 808 } 809 810 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 811 { 812 int page_node = page_to_nid(virt_to_page(objp)); 813 int node = numa_mem_id(); 814 /* 815 * Make sure we are not freeing a object from another node to the array 816 * cache on this cpu. 817 */ 818 if (likely(node == page_node)) 819 return 0; 820 821 return __cache_free_alien(cachep, objp, node, page_node); 822 } 823 824 /* 825 * Construct gfp mask to allocate from a specific node but do not reclaim or 826 * warn about failures. 827 */ 828 static inline gfp_t gfp_exact_node(gfp_t flags) 829 { 830 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 831 } 832 #endif 833 834 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) 835 { 836 struct kmem_cache_node *n; 837 838 /* 839 * Set up the kmem_cache_node for cpu before we can 840 * begin anything. Make sure some other cpu on this 841 * node has not already allocated this 842 */ 843 n = get_node(cachep, node); 844 if (n) { 845 spin_lock_irq(&n->list_lock); 846 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + 847 cachep->num; 848 spin_unlock_irq(&n->list_lock); 849 850 return 0; 851 } 852 853 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 854 if (!n) 855 return -ENOMEM; 856 857 kmem_cache_node_init(n); 858 n->next_reap = jiffies + REAPTIMEOUT_NODE + 859 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 860 861 n->free_limit = 862 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; 863 864 /* 865 * The kmem_cache_nodes don't come and go as CPUs 866 * come and go. slab_mutex is sufficient 867 * protection here. 868 */ 869 cachep->node[node] = n; 870 871 return 0; 872 } 873 874 #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP) 875 /* 876 * Allocates and initializes node for a node on each slab cache, used for 877 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 878 * will be allocated off-node since memory is not yet online for the new node. 879 * When hotplugging memory or a cpu, existing node are not replaced if 880 * already in use. 881 * 882 * Must hold slab_mutex. 883 */ 884 static int init_cache_node_node(int node) 885 { 886 int ret; 887 struct kmem_cache *cachep; 888 889 list_for_each_entry(cachep, &slab_caches, list) { 890 ret = init_cache_node(cachep, node, GFP_KERNEL); 891 if (ret) 892 return ret; 893 } 894 895 return 0; 896 } 897 #endif 898 899 static int setup_kmem_cache_node(struct kmem_cache *cachep, 900 int node, gfp_t gfp, bool force_change) 901 { 902 int ret = -ENOMEM; 903 struct kmem_cache_node *n; 904 struct array_cache *old_shared = NULL; 905 struct array_cache *new_shared = NULL; 906 struct alien_cache **new_alien = NULL; 907 LIST_HEAD(list); 908 909 if (use_alien_caches) { 910 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 911 if (!new_alien) 912 goto fail; 913 } 914 915 if (cachep->shared) { 916 new_shared = alloc_arraycache(node, 917 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); 918 if (!new_shared) 919 goto fail; 920 } 921 922 ret = init_cache_node(cachep, node, gfp); 923 if (ret) 924 goto fail; 925 926 n = get_node(cachep, node); 927 spin_lock_irq(&n->list_lock); 928 if (n->shared && force_change) { 929 free_block(cachep, n->shared->entry, 930 n->shared->avail, node, &list); 931 n->shared->avail = 0; 932 } 933 934 if (!n->shared || force_change) { 935 old_shared = n->shared; 936 n->shared = new_shared; 937 new_shared = NULL; 938 } 939 940 if (!n->alien) { 941 n->alien = new_alien; 942 new_alien = NULL; 943 } 944 945 spin_unlock_irq(&n->list_lock); 946 slabs_destroy(cachep, &list); 947 948 /* 949 * To protect lockless access to n->shared during irq disabled context. 950 * If n->shared isn't NULL in irq disabled context, accessing to it is 951 * guaranteed to be valid until irq is re-enabled, because it will be 952 * freed after synchronize_rcu(). 953 */ 954 if (old_shared && force_change) 955 synchronize_rcu(); 956 957 fail: 958 kfree(old_shared); 959 kfree(new_shared); 960 free_alien_cache(new_alien); 961 962 return ret; 963 } 964 965 #ifdef CONFIG_SMP 966 967 static void cpuup_canceled(long cpu) 968 { 969 struct kmem_cache *cachep; 970 struct kmem_cache_node *n = NULL; 971 int node = cpu_to_mem(cpu); 972 const struct cpumask *mask = cpumask_of_node(node); 973 974 list_for_each_entry(cachep, &slab_caches, list) { 975 struct array_cache *nc; 976 struct array_cache *shared; 977 struct alien_cache **alien; 978 LIST_HEAD(list); 979 980 n = get_node(cachep, node); 981 if (!n) 982 continue; 983 984 spin_lock_irq(&n->list_lock); 985 986 /* Free limit for this kmem_cache_node */ 987 n->free_limit -= cachep->batchcount; 988 989 /* cpu is dead; no one can alloc from it. */ 990 nc = per_cpu_ptr(cachep->cpu_cache, cpu); 991 if (nc) { 992 free_block(cachep, nc->entry, nc->avail, node, &list); 993 nc->avail = 0; 994 } 995 996 if (!cpumask_empty(mask)) { 997 spin_unlock_irq(&n->list_lock); 998 goto free_slab; 999 } 1000 1001 shared = n->shared; 1002 if (shared) { 1003 free_block(cachep, shared->entry, 1004 shared->avail, node, &list); 1005 n->shared = NULL; 1006 } 1007 1008 alien = n->alien; 1009 n->alien = NULL; 1010 1011 spin_unlock_irq(&n->list_lock); 1012 1013 kfree(shared); 1014 if (alien) { 1015 drain_alien_cache(cachep, alien); 1016 free_alien_cache(alien); 1017 } 1018 1019 free_slab: 1020 slabs_destroy(cachep, &list); 1021 } 1022 /* 1023 * In the previous loop, all the objects were freed to 1024 * the respective cache's slabs, now we can go ahead and 1025 * shrink each nodelist to its limit. 1026 */ 1027 list_for_each_entry(cachep, &slab_caches, list) { 1028 n = get_node(cachep, node); 1029 if (!n) 1030 continue; 1031 drain_freelist(cachep, n, INT_MAX); 1032 } 1033 } 1034 1035 static int cpuup_prepare(long cpu) 1036 { 1037 struct kmem_cache *cachep; 1038 int node = cpu_to_mem(cpu); 1039 int err; 1040 1041 /* 1042 * We need to do this right in the beginning since 1043 * alloc_arraycache's are going to use this list. 1044 * kmalloc_node allows us to add the slab to the right 1045 * kmem_cache_node and not this cpu's kmem_cache_node 1046 */ 1047 err = init_cache_node_node(node); 1048 if (err < 0) 1049 goto bad; 1050 1051 /* 1052 * Now we can go ahead with allocating the shared arrays and 1053 * array caches 1054 */ 1055 list_for_each_entry(cachep, &slab_caches, list) { 1056 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); 1057 if (err) 1058 goto bad; 1059 } 1060 1061 return 0; 1062 bad: 1063 cpuup_canceled(cpu); 1064 return -ENOMEM; 1065 } 1066 1067 int slab_prepare_cpu(unsigned int cpu) 1068 { 1069 int err; 1070 1071 mutex_lock(&slab_mutex); 1072 err = cpuup_prepare(cpu); 1073 mutex_unlock(&slab_mutex); 1074 return err; 1075 } 1076 1077 /* 1078 * This is called for a failed online attempt and for a successful 1079 * offline. 1080 * 1081 * Even if all the cpus of a node are down, we don't free the 1082 * kmem_list3 of any cache. This to avoid a race between cpu_down, and 1083 * a kmalloc allocation from another cpu for memory from the node of 1084 * the cpu going down. The list3 structure is usually allocated from 1085 * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). 1086 */ 1087 int slab_dead_cpu(unsigned int cpu) 1088 { 1089 mutex_lock(&slab_mutex); 1090 cpuup_canceled(cpu); 1091 mutex_unlock(&slab_mutex); 1092 return 0; 1093 } 1094 #endif 1095 1096 static int slab_online_cpu(unsigned int cpu) 1097 { 1098 start_cpu_timer(cpu); 1099 return 0; 1100 } 1101 1102 static int slab_offline_cpu(unsigned int cpu) 1103 { 1104 /* 1105 * Shutdown cache reaper. Note that the slab_mutex is held so 1106 * that if cache_reap() is invoked it cannot do anything 1107 * expensive but will only modify reap_work and reschedule the 1108 * timer. 1109 */ 1110 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1111 /* Now the cache_reaper is guaranteed to be not running. */ 1112 per_cpu(slab_reap_work, cpu).work.func = NULL; 1113 return 0; 1114 } 1115 1116 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1117 /* 1118 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1119 * Returns -EBUSY if all objects cannot be drained so that the node is not 1120 * removed. 1121 * 1122 * Must hold slab_mutex. 1123 */ 1124 static int __meminit drain_cache_node_node(int node) 1125 { 1126 struct kmem_cache *cachep; 1127 int ret = 0; 1128 1129 list_for_each_entry(cachep, &slab_caches, list) { 1130 struct kmem_cache_node *n; 1131 1132 n = get_node(cachep, node); 1133 if (!n) 1134 continue; 1135 1136 drain_freelist(cachep, n, INT_MAX); 1137 1138 if (!list_empty(&n->slabs_full) || 1139 !list_empty(&n->slabs_partial)) { 1140 ret = -EBUSY; 1141 break; 1142 } 1143 } 1144 return ret; 1145 } 1146 1147 static int __meminit slab_memory_callback(struct notifier_block *self, 1148 unsigned long action, void *arg) 1149 { 1150 struct memory_notify *mnb = arg; 1151 int ret = 0; 1152 int nid; 1153 1154 nid = mnb->status_change_nid; 1155 if (nid < 0) 1156 goto out; 1157 1158 switch (action) { 1159 case MEM_GOING_ONLINE: 1160 mutex_lock(&slab_mutex); 1161 ret = init_cache_node_node(nid); 1162 mutex_unlock(&slab_mutex); 1163 break; 1164 case MEM_GOING_OFFLINE: 1165 mutex_lock(&slab_mutex); 1166 ret = drain_cache_node_node(nid); 1167 mutex_unlock(&slab_mutex); 1168 break; 1169 case MEM_ONLINE: 1170 case MEM_OFFLINE: 1171 case MEM_CANCEL_ONLINE: 1172 case MEM_CANCEL_OFFLINE: 1173 break; 1174 } 1175 out: 1176 return notifier_from_errno(ret); 1177 } 1178 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1179 1180 /* 1181 * swap the static kmem_cache_node with kmalloced memory 1182 */ 1183 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1184 int nodeid) 1185 { 1186 struct kmem_cache_node *ptr; 1187 1188 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1189 BUG_ON(!ptr); 1190 1191 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1192 /* 1193 * Do not assume that spinlocks can be initialized via memcpy: 1194 */ 1195 spin_lock_init(&ptr->list_lock); 1196 1197 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1198 cachep->node[nodeid] = ptr; 1199 } 1200 1201 /* 1202 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1203 * size of kmem_cache_node. 1204 */ 1205 static void __init set_up_node(struct kmem_cache *cachep, int index) 1206 { 1207 int node; 1208 1209 for_each_online_node(node) { 1210 cachep->node[node] = &init_kmem_cache_node[index + node]; 1211 cachep->node[node]->next_reap = jiffies + 1212 REAPTIMEOUT_NODE + 1213 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1214 } 1215 } 1216 1217 /* 1218 * Initialisation. Called after the page allocator have been initialised and 1219 * before smp_init(). 1220 */ 1221 void __init kmem_cache_init(void) 1222 { 1223 int i; 1224 1225 kmem_cache = &kmem_cache_boot; 1226 1227 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1) 1228 use_alien_caches = 0; 1229 1230 for (i = 0; i < NUM_INIT_LISTS; i++) 1231 kmem_cache_node_init(&init_kmem_cache_node[i]); 1232 1233 /* 1234 * Fragmentation resistance on low memory - only use bigger 1235 * page orders on machines with more than 32MB of memory if 1236 * not overridden on the command line. 1237 */ 1238 if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) 1239 slab_max_order = SLAB_MAX_ORDER_HI; 1240 1241 /* Bootstrap is tricky, because several objects are allocated 1242 * from caches that do not exist yet: 1243 * 1) initialize the kmem_cache cache: it contains the struct 1244 * kmem_cache structures of all caches, except kmem_cache itself: 1245 * kmem_cache is statically allocated. 1246 * Initially an __init data area is used for the head array and the 1247 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1248 * array at the end of the bootstrap. 1249 * 2) Create the first kmalloc cache. 1250 * The struct kmem_cache for the new cache is allocated normally. 1251 * An __init data area is used for the head array. 1252 * 3) Create the remaining kmalloc caches, with minimally sized 1253 * head arrays. 1254 * 4) Replace the __init data head arrays for kmem_cache and the first 1255 * kmalloc cache with kmalloc allocated arrays. 1256 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1257 * the other cache's with kmalloc allocated memory. 1258 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1259 */ 1260 1261 /* 1) create the kmem_cache */ 1262 1263 /* 1264 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1265 */ 1266 create_boot_cache(kmem_cache, "kmem_cache", 1267 offsetof(struct kmem_cache, node) + 1268 nr_node_ids * sizeof(struct kmem_cache_node *), 1269 SLAB_HWCACHE_ALIGN, 0, 0); 1270 list_add(&kmem_cache->list, &slab_caches); 1271 memcg_link_cache(kmem_cache); 1272 slab_state = PARTIAL; 1273 1274 /* 1275 * Initialize the caches that provide memory for the kmem_cache_node 1276 * structures first. Without this, further allocations will bug. 1277 */ 1278 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( 1279 kmalloc_info[INDEX_NODE].name, 1280 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS, 1281 0, kmalloc_size(INDEX_NODE)); 1282 slab_state = PARTIAL_NODE; 1283 setup_kmalloc_cache_index_table(); 1284 1285 slab_early_init = 0; 1286 1287 /* 5) Replace the bootstrap kmem_cache_node */ 1288 { 1289 int nid; 1290 1291 for_each_online_node(nid) { 1292 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1293 1294 init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE], 1295 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1296 } 1297 } 1298 1299 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1300 } 1301 1302 void __init kmem_cache_init_late(void) 1303 { 1304 struct kmem_cache *cachep; 1305 1306 /* 6) resize the head arrays to their final sizes */ 1307 mutex_lock(&slab_mutex); 1308 list_for_each_entry(cachep, &slab_caches, list) 1309 if (enable_cpucache(cachep, GFP_NOWAIT)) 1310 BUG(); 1311 mutex_unlock(&slab_mutex); 1312 1313 /* Done! */ 1314 slab_state = FULL; 1315 1316 #ifdef CONFIG_NUMA 1317 /* 1318 * Register a memory hotplug callback that initializes and frees 1319 * node. 1320 */ 1321 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1322 #endif 1323 1324 /* 1325 * The reap timers are started later, with a module init call: That part 1326 * of the kernel is not yet operational. 1327 */ 1328 } 1329 1330 static int __init cpucache_init(void) 1331 { 1332 int ret; 1333 1334 /* 1335 * Register the timers that return unneeded pages to the page allocator 1336 */ 1337 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", 1338 slab_online_cpu, slab_offline_cpu); 1339 WARN_ON(ret < 0); 1340 1341 return 0; 1342 } 1343 __initcall(cpucache_init); 1344 1345 static noinline void 1346 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1347 { 1348 #if DEBUG 1349 struct kmem_cache_node *n; 1350 unsigned long flags; 1351 int node; 1352 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 1353 DEFAULT_RATELIMIT_BURST); 1354 1355 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) 1356 return; 1357 1358 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 1359 nodeid, gfpflags, &gfpflags); 1360 pr_warn(" cache: %s, object size: %d, order: %d\n", 1361 cachep->name, cachep->size, cachep->gfporder); 1362 1363 for_each_kmem_cache_node(cachep, node, n) { 1364 unsigned long total_slabs, free_slabs, free_objs; 1365 1366 spin_lock_irqsave(&n->list_lock, flags); 1367 total_slabs = n->total_slabs; 1368 free_slabs = n->free_slabs; 1369 free_objs = n->free_objects; 1370 spin_unlock_irqrestore(&n->list_lock, flags); 1371 1372 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", 1373 node, total_slabs - free_slabs, total_slabs, 1374 (total_slabs * cachep->num) - free_objs, 1375 total_slabs * cachep->num); 1376 } 1377 #endif 1378 } 1379 1380 /* 1381 * Interface to system's page allocator. No need to hold the 1382 * kmem_cache_node ->list_lock. 1383 * 1384 * If we requested dmaable memory, we will get it. Even if we 1385 * did not request dmaable memory, we might get it, but that 1386 * would be relatively rare and ignorable. 1387 */ 1388 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1389 int nodeid) 1390 { 1391 struct page *page; 1392 int nr_pages; 1393 1394 flags |= cachep->allocflags; 1395 1396 page = __alloc_pages_node(nodeid, flags, cachep->gfporder); 1397 if (!page) { 1398 slab_out_of_memory(cachep, flags, nodeid); 1399 return NULL; 1400 } 1401 1402 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { 1403 __free_pages(page, cachep->gfporder); 1404 return NULL; 1405 } 1406 1407 nr_pages = (1 << cachep->gfporder); 1408 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1409 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages); 1410 else 1411 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages); 1412 1413 __SetPageSlab(page); 1414 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1415 if (sk_memalloc_socks() && page_is_pfmemalloc(page)) 1416 SetPageSlabPfmemalloc(page); 1417 1418 return page; 1419 } 1420 1421 /* 1422 * Interface to system's page release. 1423 */ 1424 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1425 { 1426 int order = cachep->gfporder; 1427 unsigned long nr_freed = (1 << order); 1428 1429 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1430 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed); 1431 else 1432 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed); 1433 1434 BUG_ON(!PageSlab(page)); 1435 __ClearPageSlabPfmemalloc(page); 1436 __ClearPageSlab(page); 1437 page_mapcount_reset(page); 1438 page->mapping = NULL; 1439 1440 if (current->reclaim_state) 1441 current->reclaim_state->reclaimed_slab += nr_freed; 1442 memcg_uncharge_slab(page, order, cachep); 1443 __free_pages(page, order); 1444 } 1445 1446 static void kmem_rcu_free(struct rcu_head *head) 1447 { 1448 struct kmem_cache *cachep; 1449 struct page *page; 1450 1451 page = container_of(head, struct page, rcu_head); 1452 cachep = page->slab_cache; 1453 1454 kmem_freepages(cachep, page); 1455 } 1456 1457 #if DEBUG 1458 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) 1459 { 1460 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && 1461 (cachep->size % PAGE_SIZE) == 0) 1462 return true; 1463 1464 return false; 1465 } 1466 1467 #ifdef CONFIG_DEBUG_PAGEALLOC 1468 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1469 unsigned long caller) 1470 { 1471 int size = cachep->object_size; 1472 1473 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1474 1475 if (size < 5 * sizeof(unsigned long)) 1476 return; 1477 1478 *addr++ = 0x12345678; 1479 *addr++ = caller; 1480 *addr++ = smp_processor_id(); 1481 size -= 3 * sizeof(unsigned long); 1482 { 1483 unsigned long *sptr = &caller; 1484 unsigned long svalue; 1485 1486 while (!kstack_end(sptr)) { 1487 svalue = *sptr++; 1488 if (kernel_text_address(svalue)) { 1489 *addr++ = svalue; 1490 size -= sizeof(unsigned long); 1491 if (size <= sizeof(unsigned long)) 1492 break; 1493 } 1494 } 1495 1496 } 1497 *addr++ = 0x87654321; 1498 } 1499 1500 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, 1501 int map, unsigned long caller) 1502 { 1503 if (!is_debug_pagealloc_cache(cachep)) 1504 return; 1505 1506 if (caller) 1507 store_stackinfo(cachep, objp, caller); 1508 1509 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); 1510 } 1511 1512 #else 1513 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, 1514 int map, unsigned long caller) {} 1515 1516 #endif 1517 1518 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1519 { 1520 int size = cachep->object_size; 1521 addr = &((char *)addr)[obj_offset(cachep)]; 1522 1523 memset(addr, val, size); 1524 *(unsigned char *)(addr + size - 1) = POISON_END; 1525 } 1526 1527 static void dump_line(char *data, int offset, int limit) 1528 { 1529 int i; 1530 unsigned char error = 0; 1531 int bad_count = 0; 1532 1533 pr_err("%03x: ", offset); 1534 for (i = 0; i < limit; i++) { 1535 if (data[offset + i] != POISON_FREE) { 1536 error = data[offset + i]; 1537 bad_count++; 1538 } 1539 } 1540 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1541 &data[offset], limit, 1); 1542 1543 if (bad_count == 1) { 1544 error ^= POISON_FREE; 1545 if (!(error & (error - 1))) { 1546 pr_err("Single bit error detected. Probably bad RAM.\n"); 1547 #ifdef CONFIG_X86 1548 pr_err("Run memtest86+ or a similar memory test tool.\n"); 1549 #else 1550 pr_err("Run a memory test tool.\n"); 1551 #endif 1552 } 1553 } 1554 } 1555 #endif 1556 1557 #if DEBUG 1558 1559 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1560 { 1561 int i, size; 1562 char *realobj; 1563 1564 if (cachep->flags & SLAB_RED_ZONE) { 1565 pr_err("Redzone: 0x%llx/0x%llx\n", 1566 *dbg_redzone1(cachep, objp), 1567 *dbg_redzone2(cachep, objp)); 1568 } 1569 1570 if (cachep->flags & SLAB_STORE_USER) 1571 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); 1572 realobj = (char *)objp + obj_offset(cachep); 1573 size = cachep->object_size; 1574 for (i = 0; i < size && lines; i += 16, lines--) { 1575 int limit; 1576 limit = 16; 1577 if (i + limit > size) 1578 limit = size - i; 1579 dump_line(realobj, i, limit); 1580 } 1581 } 1582 1583 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1584 { 1585 char *realobj; 1586 int size, i; 1587 int lines = 0; 1588 1589 if (is_debug_pagealloc_cache(cachep)) 1590 return; 1591 1592 realobj = (char *)objp + obj_offset(cachep); 1593 size = cachep->object_size; 1594 1595 for (i = 0; i < size; i++) { 1596 char exp = POISON_FREE; 1597 if (i == size - 1) 1598 exp = POISON_END; 1599 if (realobj[i] != exp) { 1600 int limit; 1601 /* Mismatch ! */ 1602 /* Print header */ 1603 if (lines == 0) { 1604 pr_err("Slab corruption (%s): %s start=%px, len=%d\n", 1605 print_tainted(), cachep->name, 1606 realobj, size); 1607 print_objinfo(cachep, objp, 0); 1608 } 1609 /* Hexdump the affected line */ 1610 i = (i / 16) * 16; 1611 limit = 16; 1612 if (i + limit > size) 1613 limit = size - i; 1614 dump_line(realobj, i, limit); 1615 i += 16; 1616 lines++; 1617 /* Limit to 5 lines */ 1618 if (lines > 5) 1619 break; 1620 } 1621 } 1622 if (lines != 0) { 1623 /* Print some data about the neighboring objects, if they 1624 * exist: 1625 */ 1626 struct page *page = virt_to_head_page(objp); 1627 unsigned int objnr; 1628 1629 objnr = obj_to_index(cachep, page, objp); 1630 if (objnr) { 1631 objp = index_to_obj(cachep, page, objnr - 1); 1632 realobj = (char *)objp + obj_offset(cachep); 1633 pr_err("Prev obj: start=%px, len=%d\n", realobj, size); 1634 print_objinfo(cachep, objp, 2); 1635 } 1636 if (objnr + 1 < cachep->num) { 1637 objp = index_to_obj(cachep, page, objnr + 1); 1638 realobj = (char *)objp + obj_offset(cachep); 1639 pr_err("Next obj: start=%px, len=%d\n", realobj, size); 1640 print_objinfo(cachep, objp, 2); 1641 } 1642 } 1643 } 1644 #endif 1645 1646 #if DEBUG 1647 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1648 struct page *page) 1649 { 1650 int i; 1651 1652 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { 1653 poison_obj(cachep, page->freelist - obj_offset(cachep), 1654 POISON_FREE); 1655 } 1656 1657 for (i = 0; i < cachep->num; i++) { 1658 void *objp = index_to_obj(cachep, page, i); 1659 1660 if (cachep->flags & SLAB_POISON) { 1661 check_poison_obj(cachep, objp); 1662 slab_kernel_map(cachep, objp, 1, 0); 1663 } 1664 if (cachep->flags & SLAB_RED_ZONE) { 1665 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1666 slab_error(cachep, "start of a freed object was overwritten"); 1667 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1668 slab_error(cachep, "end of a freed object was overwritten"); 1669 } 1670 } 1671 } 1672 #else 1673 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1674 struct page *page) 1675 { 1676 } 1677 #endif 1678 1679 /** 1680 * slab_destroy - destroy and release all objects in a slab 1681 * @cachep: cache pointer being destroyed 1682 * @page: page pointer being destroyed 1683 * 1684 * Destroy all the objs in a slab page, and release the mem back to the system. 1685 * Before calling the slab page must have been unlinked from the cache. The 1686 * kmem_cache_node ->list_lock is not held/needed. 1687 */ 1688 static void slab_destroy(struct kmem_cache *cachep, struct page *page) 1689 { 1690 void *freelist; 1691 1692 freelist = page->freelist; 1693 slab_destroy_debugcheck(cachep, page); 1694 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) 1695 call_rcu(&page->rcu_head, kmem_rcu_free); 1696 else 1697 kmem_freepages(cachep, page); 1698 1699 /* 1700 * From now on, we don't use freelist 1701 * although actual page can be freed in rcu context 1702 */ 1703 if (OFF_SLAB(cachep)) 1704 kmem_cache_free(cachep->freelist_cache, freelist); 1705 } 1706 1707 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) 1708 { 1709 struct page *page, *n; 1710 1711 list_for_each_entry_safe(page, n, list, lru) { 1712 list_del(&page->lru); 1713 slab_destroy(cachep, page); 1714 } 1715 } 1716 1717 /** 1718 * calculate_slab_order - calculate size (page order) of slabs 1719 * @cachep: pointer to the cache that is being created 1720 * @size: size of objects to be created in this cache. 1721 * @flags: slab allocation flags 1722 * 1723 * Also calculates the number of objects per slab. 1724 * 1725 * This could be made much more intelligent. For now, try to avoid using 1726 * high order pages for slabs. When the gfp() functions are more friendly 1727 * towards high-order requests, this should be changed. 1728 */ 1729 static size_t calculate_slab_order(struct kmem_cache *cachep, 1730 size_t size, slab_flags_t flags) 1731 { 1732 size_t left_over = 0; 1733 int gfporder; 1734 1735 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1736 unsigned int num; 1737 size_t remainder; 1738 1739 num = cache_estimate(gfporder, size, flags, &remainder); 1740 if (!num) 1741 continue; 1742 1743 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 1744 if (num > SLAB_OBJ_MAX_NUM) 1745 break; 1746 1747 if (flags & CFLGS_OFF_SLAB) { 1748 struct kmem_cache *freelist_cache; 1749 size_t freelist_size; 1750 1751 freelist_size = num * sizeof(freelist_idx_t); 1752 freelist_cache = kmalloc_slab(freelist_size, 0u); 1753 if (!freelist_cache) 1754 continue; 1755 1756 /* 1757 * Needed to avoid possible looping condition 1758 * in cache_grow_begin() 1759 */ 1760 if (OFF_SLAB(freelist_cache)) 1761 continue; 1762 1763 /* check if off slab has enough benefit */ 1764 if (freelist_cache->size > cachep->size / 2) 1765 continue; 1766 } 1767 1768 /* Found something acceptable - save it away */ 1769 cachep->num = num; 1770 cachep->gfporder = gfporder; 1771 left_over = remainder; 1772 1773 /* 1774 * A VFS-reclaimable slab tends to have most allocations 1775 * as GFP_NOFS and we really don't want to have to be allocating 1776 * higher-order pages when we are unable to shrink dcache. 1777 */ 1778 if (flags & SLAB_RECLAIM_ACCOUNT) 1779 break; 1780 1781 /* 1782 * Large number of objects is good, but very large slabs are 1783 * currently bad for the gfp()s. 1784 */ 1785 if (gfporder >= slab_max_order) 1786 break; 1787 1788 /* 1789 * Acceptable internal fragmentation? 1790 */ 1791 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1792 break; 1793 } 1794 return left_over; 1795 } 1796 1797 static struct array_cache __percpu *alloc_kmem_cache_cpus( 1798 struct kmem_cache *cachep, int entries, int batchcount) 1799 { 1800 int cpu; 1801 size_t size; 1802 struct array_cache __percpu *cpu_cache; 1803 1804 size = sizeof(void *) * entries + sizeof(struct array_cache); 1805 cpu_cache = __alloc_percpu(size, sizeof(void *)); 1806 1807 if (!cpu_cache) 1808 return NULL; 1809 1810 for_each_possible_cpu(cpu) { 1811 init_arraycache(per_cpu_ptr(cpu_cache, cpu), 1812 entries, batchcount); 1813 } 1814 1815 return cpu_cache; 1816 } 1817 1818 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 1819 { 1820 if (slab_state >= FULL) 1821 return enable_cpucache(cachep, gfp); 1822 1823 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); 1824 if (!cachep->cpu_cache) 1825 return 1; 1826 1827 if (slab_state == DOWN) { 1828 /* Creation of first cache (kmem_cache). */ 1829 set_up_node(kmem_cache, CACHE_CACHE); 1830 } else if (slab_state == PARTIAL) { 1831 /* For kmem_cache_node */ 1832 set_up_node(cachep, SIZE_NODE); 1833 } else { 1834 int node; 1835 1836 for_each_online_node(node) { 1837 cachep->node[node] = kmalloc_node( 1838 sizeof(struct kmem_cache_node), gfp, node); 1839 BUG_ON(!cachep->node[node]); 1840 kmem_cache_node_init(cachep->node[node]); 1841 } 1842 } 1843 1844 cachep->node[numa_mem_id()]->next_reap = 1845 jiffies + REAPTIMEOUT_NODE + 1846 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1847 1848 cpu_cache_get(cachep)->avail = 0; 1849 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 1850 cpu_cache_get(cachep)->batchcount = 1; 1851 cpu_cache_get(cachep)->touched = 0; 1852 cachep->batchcount = 1; 1853 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1854 return 0; 1855 } 1856 1857 slab_flags_t kmem_cache_flags(unsigned int object_size, 1858 slab_flags_t flags, const char *name, 1859 void (*ctor)(void *)) 1860 { 1861 return flags; 1862 } 1863 1864 struct kmem_cache * 1865 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 1866 slab_flags_t flags, void (*ctor)(void *)) 1867 { 1868 struct kmem_cache *cachep; 1869 1870 cachep = find_mergeable(size, align, flags, name, ctor); 1871 if (cachep) { 1872 cachep->refcount++; 1873 1874 /* 1875 * Adjust the object sizes so that we clear 1876 * the complete object on kzalloc. 1877 */ 1878 cachep->object_size = max_t(int, cachep->object_size, size); 1879 } 1880 return cachep; 1881 } 1882 1883 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, 1884 size_t size, slab_flags_t flags) 1885 { 1886 size_t left; 1887 1888 cachep->num = 0; 1889 1890 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) 1891 return false; 1892 1893 left = calculate_slab_order(cachep, size, 1894 flags | CFLGS_OBJFREELIST_SLAB); 1895 if (!cachep->num) 1896 return false; 1897 1898 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) 1899 return false; 1900 1901 cachep->colour = left / cachep->colour_off; 1902 1903 return true; 1904 } 1905 1906 static bool set_off_slab_cache(struct kmem_cache *cachep, 1907 size_t size, slab_flags_t flags) 1908 { 1909 size_t left; 1910 1911 cachep->num = 0; 1912 1913 /* 1914 * Always use on-slab management when SLAB_NOLEAKTRACE 1915 * to avoid recursive calls into kmemleak. 1916 */ 1917 if (flags & SLAB_NOLEAKTRACE) 1918 return false; 1919 1920 /* 1921 * Size is large, assume best to place the slab management obj 1922 * off-slab (should allow better packing of objs). 1923 */ 1924 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); 1925 if (!cachep->num) 1926 return false; 1927 1928 /* 1929 * If the slab has been placed off-slab, and we have enough space then 1930 * move it on-slab. This is at the expense of any extra colouring. 1931 */ 1932 if (left >= cachep->num * sizeof(freelist_idx_t)) 1933 return false; 1934 1935 cachep->colour = left / cachep->colour_off; 1936 1937 return true; 1938 } 1939 1940 static bool set_on_slab_cache(struct kmem_cache *cachep, 1941 size_t size, slab_flags_t flags) 1942 { 1943 size_t left; 1944 1945 cachep->num = 0; 1946 1947 left = calculate_slab_order(cachep, size, flags); 1948 if (!cachep->num) 1949 return false; 1950 1951 cachep->colour = left / cachep->colour_off; 1952 1953 return true; 1954 } 1955 1956 /** 1957 * __kmem_cache_create - Create a cache. 1958 * @cachep: cache management descriptor 1959 * @flags: SLAB flags 1960 * 1961 * Returns a ptr to the cache on success, NULL on failure. 1962 * Cannot be called within a int, but can be interrupted. 1963 * The @ctor is run when new pages are allocated by the cache. 1964 * 1965 * The flags are 1966 * 1967 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 1968 * to catch references to uninitialised memory. 1969 * 1970 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1971 * for buffer overruns. 1972 * 1973 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1974 * cacheline. This can be beneficial if you're counting cycles as closely 1975 * as davem. 1976 */ 1977 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) 1978 { 1979 size_t ralign = BYTES_PER_WORD; 1980 gfp_t gfp; 1981 int err; 1982 unsigned int size = cachep->size; 1983 1984 #if DEBUG 1985 #if FORCED_DEBUG 1986 /* 1987 * Enable redzoning and last user accounting, except for caches with 1988 * large objects, if the increased size would increase the object size 1989 * above the next power of two: caches with object sizes just above a 1990 * power of two have a significant amount of internal fragmentation. 1991 */ 1992 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 1993 2 * sizeof(unsigned long long))) 1994 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 1995 if (!(flags & SLAB_TYPESAFE_BY_RCU)) 1996 flags |= SLAB_POISON; 1997 #endif 1998 #endif 1999 2000 /* 2001 * Check that size is in terms of words. This is needed to avoid 2002 * unaligned accesses for some archs when redzoning is used, and makes 2003 * sure any on-slab bufctl's are also correctly aligned. 2004 */ 2005 size = ALIGN(size, BYTES_PER_WORD); 2006 2007 if (flags & SLAB_RED_ZONE) { 2008 ralign = REDZONE_ALIGN; 2009 /* If redzoning, ensure that the second redzone is suitably 2010 * aligned, by adjusting the object size accordingly. */ 2011 size = ALIGN(size, REDZONE_ALIGN); 2012 } 2013 2014 /* 3) caller mandated alignment */ 2015 if (ralign < cachep->align) { 2016 ralign = cachep->align; 2017 } 2018 /* disable debug if necessary */ 2019 if (ralign > __alignof__(unsigned long long)) 2020 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2021 /* 2022 * 4) Store it. 2023 */ 2024 cachep->align = ralign; 2025 cachep->colour_off = cache_line_size(); 2026 /* Offset must be a multiple of the alignment. */ 2027 if (cachep->colour_off < cachep->align) 2028 cachep->colour_off = cachep->align; 2029 2030 if (slab_is_available()) 2031 gfp = GFP_KERNEL; 2032 else 2033 gfp = GFP_NOWAIT; 2034 2035 #if DEBUG 2036 2037 /* 2038 * Both debugging options require word-alignment which is calculated 2039 * into align above. 2040 */ 2041 if (flags & SLAB_RED_ZONE) { 2042 /* add space for red zone words */ 2043 cachep->obj_offset += sizeof(unsigned long long); 2044 size += 2 * sizeof(unsigned long long); 2045 } 2046 if (flags & SLAB_STORE_USER) { 2047 /* user store requires one word storage behind the end of 2048 * the real object. But if the second red zone needs to be 2049 * aligned to 64 bits, we must allow that much space. 2050 */ 2051 if (flags & SLAB_RED_ZONE) 2052 size += REDZONE_ALIGN; 2053 else 2054 size += BYTES_PER_WORD; 2055 } 2056 #endif 2057 2058 kasan_cache_create(cachep, &size, &flags); 2059 2060 size = ALIGN(size, cachep->align); 2061 /* 2062 * We should restrict the number of objects in a slab to implement 2063 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2064 */ 2065 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2066 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2067 2068 #if DEBUG 2069 /* 2070 * To activate debug pagealloc, off-slab management is necessary 2071 * requirement. In early phase of initialization, small sized slab 2072 * doesn't get initialized so it would not be possible. So, we need 2073 * to check size >= 256. It guarantees that all necessary small 2074 * sized slab is initialized in current slab initialization sequence. 2075 */ 2076 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && 2077 size >= 256 && cachep->object_size > cache_line_size()) { 2078 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { 2079 size_t tmp_size = ALIGN(size, PAGE_SIZE); 2080 2081 if (set_off_slab_cache(cachep, tmp_size, flags)) { 2082 flags |= CFLGS_OFF_SLAB; 2083 cachep->obj_offset += tmp_size - size; 2084 size = tmp_size; 2085 goto done; 2086 } 2087 } 2088 } 2089 #endif 2090 2091 if (set_objfreelist_slab_cache(cachep, size, flags)) { 2092 flags |= CFLGS_OBJFREELIST_SLAB; 2093 goto done; 2094 } 2095 2096 if (set_off_slab_cache(cachep, size, flags)) { 2097 flags |= CFLGS_OFF_SLAB; 2098 goto done; 2099 } 2100 2101 if (set_on_slab_cache(cachep, size, flags)) 2102 goto done; 2103 2104 return -E2BIG; 2105 2106 done: 2107 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); 2108 cachep->flags = flags; 2109 cachep->allocflags = __GFP_COMP; 2110 if (flags & SLAB_CACHE_DMA) 2111 cachep->allocflags |= GFP_DMA; 2112 if (flags & SLAB_RECLAIM_ACCOUNT) 2113 cachep->allocflags |= __GFP_RECLAIMABLE; 2114 cachep->size = size; 2115 cachep->reciprocal_buffer_size = reciprocal_value(size); 2116 2117 #if DEBUG 2118 /* 2119 * If we're going to use the generic kernel_map_pages() 2120 * poisoning, then it's going to smash the contents of 2121 * the redzone and userword anyhow, so switch them off. 2122 */ 2123 if (IS_ENABLED(CONFIG_PAGE_POISONING) && 2124 (cachep->flags & SLAB_POISON) && 2125 is_debug_pagealloc_cache(cachep)) 2126 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2127 #endif 2128 2129 if (OFF_SLAB(cachep)) { 2130 cachep->freelist_cache = 2131 kmalloc_slab(cachep->freelist_size, 0u); 2132 } 2133 2134 err = setup_cpu_cache(cachep, gfp); 2135 if (err) { 2136 __kmem_cache_release(cachep); 2137 return err; 2138 } 2139 2140 return 0; 2141 } 2142 2143 #if DEBUG 2144 static void check_irq_off(void) 2145 { 2146 BUG_ON(!irqs_disabled()); 2147 } 2148 2149 static void check_irq_on(void) 2150 { 2151 BUG_ON(irqs_disabled()); 2152 } 2153 2154 static void check_mutex_acquired(void) 2155 { 2156 BUG_ON(!mutex_is_locked(&slab_mutex)); 2157 } 2158 2159 static void check_spinlock_acquired(struct kmem_cache *cachep) 2160 { 2161 #ifdef CONFIG_SMP 2162 check_irq_off(); 2163 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); 2164 #endif 2165 } 2166 2167 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2168 { 2169 #ifdef CONFIG_SMP 2170 check_irq_off(); 2171 assert_spin_locked(&get_node(cachep, node)->list_lock); 2172 #endif 2173 } 2174 2175 #else 2176 #define check_irq_off() do { } while(0) 2177 #define check_irq_on() do { } while(0) 2178 #define check_mutex_acquired() do { } while(0) 2179 #define check_spinlock_acquired(x) do { } while(0) 2180 #define check_spinlock_acquired_node(x, y) do { } while(0) 2181 #endif 2182 2183 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, 2184 int node, bool free_all, struct list_head *list) 2185 { 2186 int tofree; 2187 2188 if (!ac || !ac->avail) 2189 return; 2190 2191 tofree = free_all ? ac->avail : (ac->limit + 4) / 5; 2192 if (tofree > ac->avail) 2193 tofree = (ac->avail + 1) / 2; 2194 2195 free_block(cachep, ac->entry, tofree, node, list); 2196 ac->avail -= tofree; 2197 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); 2198 } 2199 2200 static void do_drain(void *arg) 2201 { 2202 struct kmem_cache *cachep = arg; 2203 struct array_cache *ac; 2204 int node = numa_mem_id(); 2205 struct kmem_cache_node *n; 2206 LIST_HEAD(list); 2207 2208 check_irq_off(); 2209 ac = cpu_cache_get(cachep); 2210 n = get_node(cachep, node); 2211 spin_lock(&n->list_lock); 2212 free_block(cachep, ac->entry, ac->avail, node, &list); 2213 spin_unlock(&n->list_lock); 2214 slabs_destroy(cachep, &list); 2215 ac->avail = 0; 2216 } 2217 2218 static void drain_cpu_caches(struct kmem_cache *cachep) 2219 { 2220 struct kmem_cache_node *n; 2221 int node; 2222 LIST_HEAD(list); 2223 2224 on_each_cpu(do_drain, cachep, 1); 2225 check_irq_on(); 2226 for_each_kmem_cache_node(cachep, node, n) 2227 if (n->alien) 2228 drain_alien_cache(cachep, n->alien); 2229 2230 for_each_kmem_cache_node(cachep, node, n) { 2231 spin_lock_irq(&n->list_lock); 2232 drain_array_locked(cachep, n->shared, node, true, &list); 2233 spin_unlock_irq(&n->list_lock); 2234 2235 slabs_destroy(cachep, &list); 2236 } 2237 } 2238 2239 /* 2240 * Remove slabs from the list of free slabs. 2241 * Specify the number of slabs to drain in tofree. 2242 * 2243 * Returns the actual number of slabs released. 2244 */ 2245 static int drain_freelist(struct kmem_cache *cache, 2246 struct kmem_cache_node *n, int tofree) 2247 { 2248 struct list_head *p; 2249 int nr_freed; 2250 struct page *page; 2251 2252 nr_freed = 0; 2253 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2254 2255 spin_lock_irq(&n->list_lock); 2256 p = n->slabs_free.prev; 2257 if (p == &n->slabs_free) { 2258 spin_unlock_irq(&n->list_lock); 2259 goto out; 2260 } 2261 2262 page = list_entry(p, struct page, lru); 2263 list_del(&page->lru); 2264 n->free_slabs--; 2265 n->total_slabs--; 2266 /* 2267 * Safe to drop the lock. The slab is no longer linked 2268 * to the cache. 2269 */ 2270 n->free_objects -= cache->num; 2271 spin_unlock_irq(&n->list_lock); 2272 slab_destroy(cache, page); 2273 nr_freed++; 2274 } 2275 out: 2276 return nr_freed; 2277 } 2278 2279 bool __kmem_cache_empty(struct kmem_cache *s) 2280 { 2281 int node; 2282 struct kmem_cache_node *n; 2283 2284 for_each_kmem_cache_node(s, node, n) 2285 if (!list_empty(&n->slabs_full) || 2286 !list_empty(&n->slabs_partial)) 2287 return false; 2288 return true; 2289 } 2290 2291 int __kmem_cache_shrink(struct kmem_cache *cachep) 2292 { 2293 int ret = 0; 2294 int node; 2295 struct kmem_cache_node *n; 2296 2297 drain_cpu_caches(cachep); 2298 2299 check_irq_on(); 2300 for_each_kmem_cache_node(cachep, node, n) { 2301 drain_freelist(cachep, n, INT_MAX); 2302 2303 ret += !list_empty(&n->slabs_full) || 2304 !list_empty(&n->slabs_partial); 2305 } 2306 return (ret ? 1 : 0); 2307 } 2308 2309 #ifdef CONFIG_MEMCG 2310 void __kmemcg_cache_deactivate(struct kmem_cache *cachep) 2311 { 2312 __kmem_cache_shrink(cachep); 2313 } 2314 #endif 2315 2316 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2317 { 2318 return __kmem_cache_shrink(cachep); 2319 } 2320 2321 void __kmem_cache_release(struct kmem_cache *cachep) 2322 { 2323 int i; 2324 struct kmem_cache_node *n; 2325 2326 cache_random_seq_destroy(cachep); 2327 2328 free_percpu(cachep->cpu_cache); 2329 2330 /* NUMA: free the node structures */ 2331 for_each_kmem_cache_node(cachep, i, n) { 2332 kfree(n->shared); 2333 free_alien_cache(n->alien); 2334 kfree(n); 2335 cachep->node[i] = NULL; 2336 } 2337 } 2338 2339 /* 2340 * Get the memory for a slab management obj. 2341 * 2342 * For a slab cache when the slab descriptor is off-slab, the 2343 * slab descriptor can't come from the same cache which is being created, 2344 * Because if it is the case, that means we defer the creation of 2345 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2346 * And we eventually call down to __kmem_cache_create(), which 2347 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. 2348 * This is a "chicken-and-egg" problem. 2349 * 2350 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2351 * which are all initialized during kmem_cache_init(). 2352 */ 2353 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2354 struct page *page, int colour_off, 2355 gfp_t local_flags, int nodeid) 2356 { 2357 void *freelist; 2358 void *addr = page_address(page); 2359 2360 page->s_mem = kasan_reset_tag(addr) + colour_off; 2361 page->active = 0; 2362 2363 if (OBJFREELIST_SLAB(cachep)) 2364 freelist = NULL; 2365 else if (OFF_SLAB(cachep)) { 2366 /* Slab management obj is off-slab. */ 2367 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2368 local_flags, nodeid); 2369 if (!freelist) 2370 return NULL; 2371 } else { 2372 /* We will use last bytes at the slab for freelist */ 2373 freelist = addr + (PAGE_SIZE << cachep->gfporder) - 2374 cachep->freelist_size; 2375 } 2376 2377 return freelist; 2378 } 2379 2380 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) 2381 { 2382 return ((freelist_idx_t *)page->freelist)[idx]; 2383 } 2384 2385 static inline void set_free_obj(struct page *page, 2386 unsigned int idx, freelist_idx_t val) 2387 { 2388 ((freelist_idx_t *)(page->freelist))[idx] = val; 2389 } 2390 2391 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) 2392 { 2393 #if DEBUG 2394 int i; 2395 2396 for (i = 0; i < cachep->num; i++) { 2397 void *objp = index_to_obj(cachep, page, i); 2398 2399 if (cachep->flags & SLAB_STORE_USER) 2400 *dbg_userword(cachep, objp) = NULL; 2401 2402 if (cachep->flags & SLAB_RED_ZONE) { 2403 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2404 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2405 } 2406 /* 2407 * Constructors are not allowed to allocate memory from the same 2408 * cache which they are a constructor for. Otherwise, deadlock. 2409 * They must also be threaded. 2410 */ 2411 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { 2412 kasan_unpoison_object_data(cachep, 2413 objp + obj_offset(cachep)); 2414 cachep->ctor(objp + obj_offset(cachep)); 2415 kasan_poison_object_data( 2416 cachep, objp + obj_offset(cachep)); 2417 } 2418 2419 if (cachep->flags & SLAB_RED_ZONE) { 2420 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2421 slab_error(cachep, "constructor overwrote the end of an object"); 2422 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2423 slab_error(cachep, "constructor overwrote the start of an object"); 2424 } 2425 /* need to poison the objs? */ 2426 if (cachep->flags & SLAB_POISON) { 2427 poison_obj(cachep, objp, POISON_FREE); 2428 slab_kernel_map(cachep, objp, 0, 0); 2429 } 2430 } 2431 #endif 2432 } 2433 2434 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2435 /* Hold information during a freelist initialization */ 2436 union freelist_init_state { 2437 struct { 2438 unsigned int pos; 2439 unsigned int *list; 2440 unsigned int count; 2441 }; 2442 struct rnd_state rnd_state; 2443 }; 2444 2445 /* 2446 * Initialize the state based on the randomization methode available. 2447 * return true if the pre-computed list is available, false otherwize. 2448 */ 2449 static bool freelist_state_initialize(union freelist_init_state *state, 2450 struct kmem_cache *cachep, 2451 unsigned int count) 2452 { 2453 bool ret; 2454 unsigned int rand; 2455 2456 /* Use best entropy available to define a random shift */ 2457 rand = get_random_int(); 2458 2459 /* Use a random state if the pre-computed list is not available */ 2460 if (!cachep->random_seq) { 2461 prandom_seed_state(&state->rnd_state, rand); 2462 ret = false; 2463 } else { 2464 state->list = cachep->random_seq; 2465 state->count = count; 2466 state->pos = rand % count; 2467 ret = true; 2468 } 2469 return ret; 2470 } 2471 2472 /* Get the next entry on the list and randomize it using a random shift */ 2473 static freelist_idx_t next_random_slot(union freelist_init_state *state) 2474 { 2475 if (state->pos >= state->count) 2476 state->pos = 0; 2477 return state->list[state->pos++]; 2478 } 2479 2480 /* Swap two freelist entries */ 2481 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b) 2482 { 2483 swap(((freelist_idx_t *)page->freelist)[a], 2484 ((freelist_idx_t *)page->freelist)[b]); 2485 } 2486 2487 /* 2488 * Shuffle the freelist initialization state based on pre-computed lists. 2489 * return true if the list was successfully shuffled, false otherwise. 2490 */ 2491 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) 2492 { 2493 unsigned int objfreelist = 0, i, rand, count = cachep->num; 2494 union freelist_init_state state; 2495 bool precomputed; 2496 2497 if (count < 2) 2498 return false; 2499 2500 precomputed = freelist_state_initialize(&state, cachep, count); 2501 2502 /* Take a random entry as the objfreelist */ 2503 if (OBJFREELIST_SLAB(cachep)) { 2504 if (!precomputed) 2505 objfreelist = count - 1; 2506 else 2507 objfreelist = next_random_slot(&state); 2508 page->freelist = index_to_obj(cachep, page, objfreelist) + 2509 obj_offset(cachep); 2510 count--; 2511 } 2512 2513 /* 2514 * On early boot, generate the list dynamically. 2515 * Later use a pre-computed list for speed. 2516 */ 2517 if (!precomputed) { 2518 for (i = 0; i < count; i++) 2519 set_free_obj(page, i, i); 2520 2521 /* Fisher-Yates shuffle */ 2522 for (i = count - 1; i > 0; i--) { 2523 rand = prandom_u32_state(&state.rnd_state); 2524 rand %= (i + 1); 2525 swap_free_obj(page, i, rand); 2526 } 2527 } else { 2528 for (i = 0; i < count; i++) 2529 set_free_obj(page, i, next_random_slot(&state)); 2530 } 2531 2532 if (OBJFREELIST_SLAB(cachep)) 2533 set_free_obj(page, cachep->num - 1, objfreelist); 2534 2535 return true; 2536 } 2537 #else 2538 static inline bool shuffle_freelist(struct kmem_cache *cachep, 2539 struct page *page) 2540 { 2541 return false; 2542 } 2543 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2544 2545 static void cache_init_objs(struct kmem_cache *cachep, 2546 struct page *page) 2547 { 2548 int i; 2549 void *objp; 2550 bool shuffled; 2551 2552 cache_init_objs_debug(cachep, page); 2553 2554 /* Try to randomize the freelist if enabled */ 2555 shuffled = shuffle_freelist(cachep, page); 2556 2557 if (!shuffled && OBJFREELIST_SLAB(cachep)) { 2558 page->freelist = index_to_obj(cachep, page, cachep->num - 1) + 2559 obj_offset(cachep); 2560 } 2561 2562 for (i = 0; i < cachep->num; i++) { 2563 objp = index_to_obj(cachep, page, i); 2564 objp = kasan_init_slab_obj(cachep, objp); 2565 2566 /* constructor could break poison info */ 2567 if (DEBUG == 0 && cachep->ctor) { 2568 kasan_unpoison_object_data(cachep, objp); 2569 cachep->ctor(objp); 2570 kasan_poison_object_data(cachep, objp); 2571 } 2572 2573 if (!shuffled) 2574 set_free_obj(page, i, i); 2575 } 2576 } 2577 2578 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) 2579 { 2580 void *objp; 2581 2582 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); 2583 page->active++; 2584 2585 #if DEBUG 2586 if (cachep->flags & SLAB_STORE_USER) 2587 set_store_user_dirty(cachep); 2588 #endif 2589 2590 return objp; 2591 } 2592 2593 static void slab_put_obj(struct kmem_cache *cachep, 2594 struct page *page, void *objp) 2595 { 2596 unsigned int objnr = obj_to_index(cachep, page, objp); 2597 #if DEBUG 2598 unsigned int i; 2599 2600 /* Verify double free bug */ 2601 for (i = page->active; i < cachep->num; i++) { 2602 if (get_free_obj(page, i) == objnr) { 2603 pr_err("slab: double free detected in cache '%s', objp %px\n", 2604 cachep->name, objp); 2605 BUG(); 2606 } 2607 } 2608 #endif 2609 page->active--; 2610 if (!page->freelist) 2611 page->freelist = objp + obj_offset(cachep); 2612 2613 set_free_obj(page, page->active, objnr); 2614 } 2615 2616 /* 2617 * Map pages beginning at addr to the given cache and slab. This is required 2618 * for the slab allocator to be able to lookup the cache and slab of a 2619 * virtual address for kfree, ksize, and slab debugging. 2620 */ 2621 static void slab_map_pages(struct kmem_cache *cache, struct page *page, 2622 void *freelist) 2623 { 2624 page->slab_cache = cache; 2625 page->freelist = freelist; 2626 } 2627 2628 /* 2629 * Grow (by 1) the number of slabs within a cache. This is called by 2630 * kmem_cache_alloc() when there are no active objs left in a cache. 2631 */ 2632 static struct page *cache_grow_begin(struct kmem_cache *cachep, 2633 gfp_t flags, int nodeid) 2634 { 2635 void *freelist; 2636 size_t offset; 2637 gfp_t local_flags; 2638 int page_node; 2639 struct kmem_cache_node *n; 2640 struct page *page; 2641 2642 /* 2643 * Be lazy and only check for valid flags here, keeping it out of the 2644 * critical path in kmem_cache_alloc(). 2645 */ 2646 if (unlikely(flags & GFP_SLAB_BUG_MASK)) { 2647 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; 2648 flags &= ~GFP_SLAB_BUG_MASK; 2649 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", 2650 invalid_mask, &invalid_mask, flags, &flags); 2651 dump_stack(); 2652 } 2653 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 2654 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2655 2656 check_irq_off(); 2657 if (gfpflags_allow_blocking(local_flags)) 2658 local_irq_enable(); 2659 2660 /* 2661 * Get mem for the objs. Attempt to allocate a physical page from 2662 * 'nodeid'. 2663 */ 2664 page = kmem_getpages(cachep, local_flags, nodeid); 2665 if (!page) 2666 goto failed; 2667 2668 page_node = page_to_nid(page); 2669 n = get_node(cachep, page_node); 2670 2671 /* Get colour for the slab, and cal the next value. */ 2672 n->colour_next++; 2673 if (n->colour_next >= cachep->colour) 2674 n->colour_next = 0; 2675 2676 offset = n->colour_next; 2677 if (offset >= cachep->colour) 2678 offset = 0; 2679 2680 offset *= cachep->colour_off; 2681 2682 /* Get slab management. */ 2683 freelist = alloc_slabmgmt(cachep, page, offset, 2684 local_flags & ~GFP_CONSTRAINT_MASK, page_node); 2685 if (OFF_SLAB(cachep) && !freelist) 2686 goto opps1; 2687 2688 slab_map_pages(cachep, page, freelist); 2689 2690 kasan_poison_slab(page); 2691 cache_init_objs(cachep, page); 2692 2693 if (gfpflags_allow_blocking(local_flags)) 2694 local_irq_disable(); 2695 2696 return page; 2697 2698 opps1: 2699 kmem_freepages(cachep, page); 2700 failed: 2701 if (gfpflags_allow_blocking(local_flags)) 2702 local_irq_disable(); 2703 return NULL; 2704 } 2705 2706 static void cache_grow_end(struct kmem_cache *cachep, struct page *page) 2707 { 2708 struct kmem_cache_node *n; 2709 void *list = NULL; 2710 2711 check_irq_off(); 2712 2713 if (!page) 2714 return; 2715 2716 INIT_LIST_HEAD(&page->lru); 2717 n = get_node(cachep, page_to_nid(page)); 2718 2719 spin_lock(&n->list_lock); 2720 n->total_slabs++; 2721 if (!page->active) { 2722 list_add_tail(&page->lru, &(n->slabs_free)); 2723 n->free_slabs++; 2724 } else 2725 fixup_slab_list(cachep, n, page, &list); 2726 2727 STATS_INC_GROWN(cachep); 2728 n->free_objects += cachep->num - page->active; 2729 spin_unlock(&n->list_lock); 2730 2731 fixup_objfreelist_debug(cachep, &list); 2732 } 2733 2734 #if DEBUG 2735 2736 /* 2737 * Perform extra freeing checks: 2738 * - detect bad pointers. 2739 * - POISON/RED_ZONE checking 2740 */ 2741 static void kfree_debugcheck(const void *objp) 2742 { 2743 if (!virt_addr_valid(objp)) { 2744 pr_err("kfree_debugcheck: out of range ptr %lxh\n", 2745 (unsigned long)objp); 2746 BUG(); 2747 } 2748 } 2749 2750 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2751 { 2752 unsigned long long redzone1, redzone2; 2753 2754 redzone1 = *dbg_redzone1(cache, obj); 2755 redzone2 = *dbg_redzone2(cache, obj); 2756 2757 /* 2758 * Redzone is ok. 2759 */ 2760 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2761 return; 2762 2763 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2764 slab_error(cache, "double free detected"); 2765 else 2766 slab_error(cache, "memory outside object was overwritten"); 2767 2768 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 2769 obj, redzone1, redzone2); 2770 } 2771 2772 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2773 unsigned long caller) 2774 { 2775 unsigned int objnr; 2776 struct page *page; 2777 2778 BUG_ON(virt_to_cache(objp) != cachep); 2779 2780 objp -= obj_offset(cachep); 2781 kfree_debugcheck(objp); 2782 page = virt_to_head_page(objp); 2783 2784 if (cachep->flags & SLAB_RED_ZONE) { 2785 verify_redzone_free(cachep, objp); 2786 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2787 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2788 } 2789 if (cachep->flags & SLAB_STORE_USER) { 2790 set_store_user_dirty(cachep); 2791 *dbg_userword(cachep, objp) = (void *)caller; 2792 } 2793 2794 objnr = obj_to_index(cachep, page, objp); 2795 2796 BUG_ON(objnr >= cachep->num); 2797 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2798 2799 if (cachep->flags & SLAB_POISON) { 2800 poison_obj(cachep, objp, POISON_FREE); 2801 slab_kernel_map(cachep, objp, 0, caller); 2802 } 2803 return objp; 2804 } 2805 2806 #else 2807 #define kfree_debugcheck(x) do { } while(0) 2808 #define cache_free_debugcheck(x,objp,z) (objp) 2809 #endif 2810 2811 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 2812 void **list) 2813 { 2814 #if DEBUG 2815 void *next = *list; 2816 void *objp; 2817 2818 while (next) { 2819 objp = next - obj_offset(cachep); 2820 next = *(void **)next; 2821 poison_obj(cachep, objp, POISON_FREE); 2822 } 2823 #endif 2824 } 2825 2826 static inline void fixup_slab_list(struct kmem_cache *cachep, 2827 struct kmem_cache_node *n, struct page *page, 2828 void **list) 2829 { 2830 /* move slabp to correct slabp list: */ 2831 list_del(&page->lru); 2832 if (page->active == cachep->num) { 2833 list_add(&page->lru, &n->slabs_full); 2834 if (OBJFREELIST_SLAB(cachep)) { 2835 #if DEBUG 2836 /* Poisoning will be done without holding the lock */ 2837 if (cachep->flags & SLAB_POISON) { 2838 void **objp = page->freelist; 2839 2840 *objp = *list; 2841 *list = objp; 2842 } 2843 #endif 2844 page->freelist = NULL; 2845 } 2846 } else 2847 list_add(&page->lru, &n->slabs_partial); 2848 } 2849 2850 /* Try to find non-pfmemalloc slab if needed */ 2851 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, 2852 struct page *page, bool pfmemalloc) 2853 { 2854 if (!page) 2855 return NULL; 2856 2857 if (pfmemalloc) 2858 return page; 2859 2860 if (!PageSlabPfmemalloc(page)) 2861 return page; 2862 2863 /* No need to keep pfmemalloc slab if we have enough free objects */ 2864 if (n->free_objects > n->free_limit) { 2865 ClearPageSlabPfmemalloc(page); 2866 return page; 2867 } 2868 2869 /* Move pfmemalloc slab to the end of list to speed up next search */ 2870 list_del(&page->lru); 2871 if (!page->active) { 2872 list_add_tail(&page->lru, &n->slabs_free); 2873 n->free_slabs++; 2874 } else 2875 list_add_tail(&page->lru, &n->slabs_partial); 2876 2877 list_for_each_entry(page, &n->slabs_partial, lru) { 2878 if (!PageSlabPfmemalloc(page)) 2879 return page; 2880 } 2881 2882 n->free_touched = 1; 2883 list_for_each_entry(page, &n->slabs_free, lru) { 2884 if (!PageSlabPfmemalloc(page)) { 2885 n->free_slabs--; 2886 return page; 2887 } 2888 } 2889 2890 return NULL; 2891 } 2892 2893 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) 2894 { 2895 struct page *page; 2896 2897 assert_spin_locked(&n->list_lock); 2898 page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); 2899 if (!page) { 2900 n->free_touched = 1; 2901 page = list_first_entry_or_null(&n->slabs_free, struct page, 2902 lru); 2903 if (page) 2904 n->free_slabs--; 2905 } 2906 2907 if (sk_memalloc_socks()) 2908 page = get_valid_first_slab(n, page, pfmemalloc); 2909 2910 return page; 2911 } 2912 2913 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, 2914 struct kmem_cache_node *n, gfp_t flags) 2915 { 2916 struct page *page; 2917 void *obj; 2918 void *list = NULL; 2919 2920 if (!gfp_pfmemalloc_allowed(flags)) 2921 return NULL; 2922 2923 spin_lock(&n->list_lock); 2924 page = get_first_slab(n, true); 2925 if (!page) { 2926 spin_unlock(&n->list_lock); 2927 return NULL; 2928 } 2929 2930 obj = slab_get_obj(cachep, page); 2931 n->free_objects--; 2932 2933 fixup_slab_list(cachep, n, page, &list); 2934 2935 spin_unlock(&n->list_lock); 2936 fixup_objfreelist_debug(cachep, &list); 2937 2938 return obj; 2939 } 2940 2941 /* 2942 * Slab list should be fixed up by fixup_slab_list() for existing slab 2943 * or cache_grow_end() for new slab 2944 */ 2945 static __always_inline int alloc_block(struct kmem_cache *cachep, 2946 struct array_cache *ac, struct page *page, int batchcount) 2947 { 2948 /* 2949 * There must be at least one object available for 2950 * allocation. 2951 */ 2952 BUG_ON(page->active >= cachep->num); 2953 2954 while (page->active < cachep->num && batchcount--) { 2955 STATS_INC_ALLOCED(cachep); 2956 STATS_INC_ACTIVE(cachep); 2957 STATS_SET_HIGH(cachep); 2958 2959 ac->entry[ac->avail++] = slab_get_obj(cachep, page); 2960 } 2961 2962 return batchcount; 2963 } 2964 2965 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2966 { 2967 int batchcount; 2968 struct kmem_cache_node *n; 2969 struct array_cache *ac, *shared; 2970 int node; 2971 void *list = NULL; 2972 struct page *page; 2973 2974 check_irq_off(); 2975 node = numa_mem_id(); 2976 2977 ac = cpu_cache_get(cachep); 2978 batchcount = ac->batchcount; 2979 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2980 /* 2981 * If there was little recent activity on this cache, then 2982 * perform only a partial refill. Otherwise we could generate 2983 * refill bouncing. 2984 */ 2985 batchcount = BATCHREFILL_LIMIT; 2986 } 2987 n = get_node(cachep, node); 2988 2989 BUG_ON(ac->avail > 0 || !n); 2990 shared = READ_ONCE(n->shared); 2991 if (!n->free_objects && (!shared || !shared->avail)) 2992 goto direct_grow; 2993 2994 spin_lock(&n->list_lock); 2995 shared = READ_ONCE(n->shared); 2996 2997 /* See if we can refill from the shared array */ 2998 if (shared && transfer_objects(ac, shared, batchcount)) { 2999 shared->touched = 1; 3000 goto alloc_done; 3001 } 3002 3003 while (batchcount > 0) { 3004 /* Get slab alloc is to come from. */ 3005 page = get_first_slab(n, false); 3006 if (!page) 3007 goto must_grow; 3008 3009 check_spinlock_acquired(cachep); 3010 3011 batchcount = alloc_block(cachep, ac, page, batchcount); 3012 fixup_slab_list(cachep, n, page, &list); 3013 } 3014 3015 must_grow: 3016 n->free_objects -= ac->avail; 3017 alloc_done: 3018 spin_unlock(&n->list_lock); 3019 fixup_objfreelist_debug(cachep, &list); 3020 3021 direct_grow: 3022 if (unlikely(!ac->avail)) { 3023 /* Check if we can use obj in pfmemalloc slab */ 3024 if (sk_memalloc_socks()) { 3025 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); 3026 3027 if (obj) 3028 return obj; 3029 } 3030 3031 page = cache_grow_begin(cachep, gfp_exact_node(flags), node); 3032 3033 /* 3034 * cache_grow_begin() can reenable interrupts, 3035 * then ac could change. 3036 */ 3037 ac = cpu_cache_get(cachep); 3038 if (!ac->avail && page) 3039 alloc_block(cachep, ac, page, batchcount); 3040 cache_grow_end(cachep, page); 3041 3042 if (!ac->avail) 3043 return NULL; 3044 } 3045 ac->touched = 1; 3046 3047 return ac->entry[--ac->avail]; 3048 } 3049 3050 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3051 gfp_t flags) 3052 { 3053 might_sleep_if(gfpflags_allow_blocking(flags)); 3054 } 3055 3056 #if DEBUG 3057 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3058 gfp_t flags, void *objp, unsigned long caller) 3059 { 3060 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 3061 if (!objp) 3062 return objp; 3063 if (cachep->flags & SLAB_POISON) { 3064 check_poison_obj(cachep, objp); 3065 slab_kernel_map(cachep, objp, 1, 0); 3066 poison_obj(cachep, objp, POISON_INUSE); 3067 } 3068 if (cachep->flags & SLAB_STORE_USER) 3069 *dbg_userword(cachep, objp) = (void *)caller; 3070 3071 if (cachep->flags & SLAB_RED_ZONE) { 3072 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3073 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3074 slab_error(cachep, "double free, or memory outside object was overwritten"); 3075 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 3076 objp, *dbg_redzone1(cachep, objp), 3077 *dbg_redzone2(cachep, objp)); 3078 } 3079 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3080 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3081 } 3082 3083 objp += obj_offset(cachep); 3084 if (cachep->ctor && cachep->flags & SLAB_POISON) 3085 cachep->ctor(objp); 3086 if (ARCH_SLAB_MINALIGN && 3087 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3088 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3089 objp, (int)ARCH_SLAB_MINALIGN); 3090 } 3091 return objp; 3092 } 3093 #else 3094 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3095 #endif 3096 3097 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3098 { 3099 void *objp; 3100 struct array_cache *ac; 3101 3102 check_irq_off(); 3103 3104 ac = cpu_cache_get(cachep); 3105 if (likely(ac->avail)) { 3106 ac->touched = 1; 3107 objp = ac->entry[--ac->avail]; 3108 3109 STATS_INC_ALLOCHIT(cachep); 3110 goto out; 3111 } 3112 3113 STATS_INC_ALLOCMISS(cachep); 3114 objp = cache_alloc_refill(cachep, flags); 3115 /* 3116 * the 'ac' may be updated by cache_alloc_refill(), 3117 * and kmemleak_erase() requires its correct value. 3118 */ 3119 ac = cpu_cache_get(cachep); 3120 3121 out: 3122 /* 3123 * To avoid a false negative, if an object that is in one of the 3124 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3125 * treat the array pointers as a reference to the object. 3126 */ 3127 if (objp) 3128 kmemleak_erase(&ac->entry[ac->avail]); 3129 return objp; 3130 } 3131 3132 #ifdef CONFIG_NUMA 3133 /* 3134 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. 3135 * 3136 * If we are in_interrupt, then process context, including cpusets and 3137 * mempolicy, may not apply and should not be used for allocation policy. 3138 */ 3139 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3140 { 3141 int nid_alloc, nid_here; 3142 3143 if (in_interrupt() || (flags & __GFP_THISNODE)) 3144 return NULL; 3145 nid_alloc = nid_here = numa_mem_id(); 3146 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3147 nid_alloc = cpuset_slab_spread_node(); 3148 else if (current->mempolicy) 3149 nid_alloc = mempolicy_slab_node(); 3150 if (nid_alloc != nid_here) 3151 return ____cache_alloc_node(cachep, flags, nid_alloc); 3152 return NULL; 3153 } 3154 3155 /* 3156 * Fallback function if there was no memory available and no objects on a 3157 * certain node and fall back is permitted. First we scan all the 3158 * available node for available objects. If that fails then we 3159 * perform an allocation without specifying a node. This allows the page 3160 * allocator to do its reclaim / fallback magic. We then insert the 3161 * slab into the proper nodelist and then allocate from it. 3162 */ 3163 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3164 { 3165 struct zonelist *zonelist; 3166 struct zoneref *z; 3167 struct zone *zone; 3168 enum zone_type high_zoneidx = gfp_zone(flags); 3169 void *obj = NULL; 3170 struct page *page; 3171 int nid; 3172 unsigned int cpuset_mems_cookie; 3173 3174 if (flags & __GFP_THISNODE) 3175 return NULL; 3176 3177 retry_cpuset: 3178 cpuset_mems_cookie = read_mems_allowed_begin(); 3179 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3180 3181 retry: 3182 /* 3183 * Look through allowed nodes for objects available 3184 * from existing per node queues. 3185 */ 3186 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3187 nid = zone_to_nid(zone); 3188 3189 if (cpuset_zone_allowed(zone, flags) && 3190 get_node(cache, nid) && 3191 get_node(cache, nid)->free_objects) { 3192 obj = ____cache_alloc_node(cache, 3193 gfp_exact_node(flags), nid); 3194 if (obj) 3195 break; 3196 } 3197 } 3198 3199 if (!obj) { 3200 /* 3201 * This allocation will be performed within the constraints 3202 * of the current cpuset / memory policy requirements. 3203 * We may trigger various forms of reclaim on the allowed 3204 * set and go into memory reserves if necessary. 3205 */ 3206 page = cache_grow_begin(cache, flags, numa_mem_id()); 3207 cache_grow_end(cache, page); 3208 if (page) { 3209 nid = page_to_nid(page); 3210 obj = ____cache_alloc_node(cache, 3211 gfp_exact_node(flags), nid); 3212 3213 /* 3214 * Another processor may allocate the objects in 3215 * the slab since we are not holding any locks. 3216 */ 3217 if (!obj) 3218 goto retry; 3219 } 3220 } 3221 3222 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3223 goto retry_cpuset; 3224 return obj; 3225 } 3226 3227 /* 3228 * A interface to enable slab creation on nodeid 3229 */ 3230 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3231 int nodeid) 3232 { 3233 struct page *page; 3234 struct kmem_cache_node *n; 3235 void *obj = NULL; 3236 void *list = NULL; 3237 3238 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); 3239 n = get_node(cachep, nodeid); 3240 BUG_ON(!n); 3241 3242 check_irq_off(); 3243 spin_lock(&n->list_lock); 3244 page = get_first_slab(n, false); 3245 if (!page) 3246 goto must_grow; 3247 3248 check_spinlock_acquired_node(cachep, nodeid); 3249 3250 STATS_INC_NODEALLOCS(cachep); 3251 STATS_INC_ACTIVE(cachep); 3252 STATS_SET_HIGH(cachep); 3253 3254 BUG_ON(page->active == cachep->num); 3255 3256 obj = slab_get_obj(cachep, page); 3257 n->free_objects--; 3258 3259 fixup_slab_list(cachep, n, page, &list); 3260 3261 spin_unlock(&n->list_lock); 3262 fixup_objfreelist_debug(cachep, &list); 3263 return obj; 3264 3265 must_grow: 3266 spin_unlock(&n->list_lock); 3267 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); 3268 if (page) { 3269 /* This slab isn't counted yet so don't update free_objects */ 3270 obj = slab_get_obj(cachep, page); 3271 } 3272 cache_grow_end(cachep, page); 3273 3274 return obj ? obj : fallback_alloc(cachep, flags); 3275 } 3276 3277 static __always_inline void * 3278 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3279 unsigned long caller) 3280 { 3281 unsigned long save_flags; 3282 void *ptr; 3283 int slab_node = numa_mem_id(); 3284 3285 flags &= gfp_allowed_mask; 3286 cachep = slab_pre_alloc_hook(cachep, flags); 3287 if (unlikely(!cachep)) 3288 return NULL; 3289 3290 cache_alloc_debugcheck_before(cachep, flags); 3291 local_irq_save(save_flags); 3292 3293 if (nodeid == NUMA_NO_NODE) 3294 nodeid = slab_node; 3295 3296 if (unlikely(!get_node(cachep, nodeid))) { 3297 /* Node not bootstrapped yet */ 3298 ptr = fallback_alloc(cachep, flags); 3299 goto out; 3300 } 3301 3302 if (nodeid == slab_node) { 3303 /* 3304 * Use the locally cached objects if possible. 3305 * However ____cache_alloc does not allow fallback 3306 * to other nodes. It may fail while we still have 3307 * objects on other nodes available. 3308 */ 3309 ptr = ____cache_alloc(cachep, flags); 3310 if (ptr) 3311 goto out; 3312 } 3313 /* ___cache_alloc_node can fall back to other nodes */ 3314 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3315 out: 3316 local_irq_restore(save_flags); 3317 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3318 3319 if (unlikely(flags & __GFP_ZERO) && ptr) 3320 memset(ptr, 0, cachep->object_size); 3321 3322 slab_post_alloc_hook(cachep, flags, 1, &ptr); 3323 return ptr; 3324 } 3325 3326 static __always_inline void * 3327 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3328 { 3329 void *objp; 3330 3331 if (current->mempolicy || cpuset_do_slab_mem_spread()) { 3332 objp = alternate_node_alloc(cache, flags); 3333 if (objp) 3334 goto out; 3335 } 3336 objp = ____cache_alloc(cache, flags); 3337 3338 /* 3339 * We may just have run out of memory on the local node. 3340 * ____cache_alloc_node() knows how to locate memory on other nodes 3341 */ 3342 if (!objp) 3343 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); 3344 3345 out: 3346 return objp; 3347 } 3348 #else 3349 3350 static __always_inline void * 3351 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3352 { 3353 return ____cache_alloc(cachep, flags); 3354 } 3355 3356 #endif /* CONFIG_NUMA */ 3357 3358 static __always_inline void * 3359 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3360 { 3361 unsigned long save_flags; 3362 void *objp; 3363 3364 flags &= gfp_allowed_mask; 3365 cachep = slab_pre_alloc_hook(cachep, flags); 3366 if (unlikely(!cachep)) 3367 return NULL; 3368 3369 cache_alloc_debugcheck_before(cachep, flags); 3370 local_irq_save(save_flags); 3371 objp = __do_cache_alloc(cachep, flags); 3372 local_irq_restore(save_flags); 3373 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3374 prefetchw(objp); 3375 3376 if (unlikely(flags & __GFP_ZERO) && objp) 3377 memset(objp, 0, cachep->object_size); 3378 3379 slab_post_alloc_hook(cachep, flags, 1, &objp); 3380 return objp; 3381 } 3382 3383 /* 3384 * Caller needs to acquire correct kmem_cache_node's list_lock 3385 * @list: List of detached free slabs should be freed by caller 3386 */ 3387 static void free_block(struct kmem_cache *cachep, void **objpp, 3388 int nr_objects, int node, struct list_head *list) 3389 { 3390 int i; 3391 struct kmem_cache_node *n = get_node(cachep, node); 3392 struct page *page; 3393 3394 n->free_objects += nr_objects; 3395 3396 for (i = 0; i < nr_objects; i++) { 3397 void *objp; 3398 struct page *page; 3399 3400 objp = objpp[i]; 3401 3402 page = virt_to_head_page(objp); 3403 list_del(&page->lru); 3404 check_spinlock_acquired_node(cachep, node); 3405 slab_put_obj(cachep, page, objp); 3406 STATS_DEC_ACTIVE(cachep); 3407 3408 /* fixup slab chains */ 3409 if (page->active == 0) { 3410 list_add(&page->lru, &n->slabs_free); 3411 n->free_slabs++; 3412 } else { 3413 /* Unconditionally move a slab to the end of the 3414 * partial list on free - maximum time for the 3415 * other objects to be freed, too. 3416 */ 3417 list_add_tail(&page->lru, &n->slabs_partial); 3418 } 3419 } 3420 3421 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { 3422 n->free_objects -= cachep->num; 3423 3424 page = list_last_entry(&n->slabs_free, struct page, lru); 3425 list_move(&page->lru, list); 3426 n->free_slabs--; 3427 n->total_slabs--; 3428 } 3429 } 3430 3431 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3432 { 3433 int batchcount; 3434 struct kmem_cache_node *n; 3435 int node = numa_mem_id(); 3436 LIST_HEAD(list); 3437 3438 batchcount = ac->batchcount; 3439 3440 check_irq_off(); 3441 n = get_node(cachep, node); 3442 spin_lock(&n->list_lock); 3443 if (n->shared) { 3444 struct array_cache *shared_array = n->shared; 3445 int max = shared_array->limit - shared_array->avail; 3446 if (max) { 3447 if (batchcount > max) 3448 batchcount = max; 3449 memcpy(&(shared_array->entry[shared_array->avail]), 3450 ac->entry, sizeof(void *) * batchcount); 3451 shared_array->avail += batchcount; 3452 goto free_done; 3453 } 3454 } 3455 3456 free_block(cachep, ac->entry, batchcount, node, &list); 3457 free_done: 3458 #if STATS 3459 { 3460 int i = 0; 3461 struct page *page; 3462 3463 list_for_each_entry(page, &n->slabs_free, lru) { 3464 BUG_ON(page->active); 3465 3466 i++; 3467 } 3468 STATS_SET_FREEABLE(cachep, i); 3469 } 3470 #endif 3471 spin_unlock(&n->list_lock); 3472 slabs_destroy(cachep, &list); 3473 ac->avail -= batchcount; 3474 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3475 } 3476 3477 /* 3478 * Release an obj back to its cache. If the obj has a constructed state, it must 3479 * be in this state _before_ it is released. Called with disabled ints. 3480 */ 3481 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, 3482 unsigned long caller) 3483 { 3484 /* Put the object into the quarantine, don't touch it for now. */ 3485 if (kasan_slab_free(cachep, objp, _RET_IP_)) 3486 return; 3487 3488 ___cache_free(cachep, objp, caller); 3489 } 3490 3491 void ___cache_free(struct kmem_cache *cachep, void *objp, 3492 unsigned long caller) 3493 { 3494 struct array_cache *ac = cpu_cache_get(cachep); 3495 3496 check_irq_off(); 3497 kmemleak_free_recursive(objp, cachep->flags); 3498 objp = cache_free_debugcheck(cachep, objp, caller); 3499 3500 /* 3501 * Skip calling cache_free_alien() when the platform is not numa. 3502 * This will avoid cache misses that happen while accessing slabp (which 3503 * is per page memory reference) to get nodeid. Instead use a global 3504 * variable to skip the call, which is mostly likely to be present in 3505 * the cache. 3506 */ 3507 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3508 return; 3509 3510 if (ac->avail < ac->limit) { 3511 STATS_INC_FREEHIT(cachep); 3512 } else { 3513 STATS_INC_FREEMISS(cachep); 3514 cache_flusharray(cachep, ac); 3515 } 3516 3517 if (sk_memalloc_socks()) { 3518 struct page *page = virt_to_head_page(objp); 3519 3520 if (unlikely(PageSlabPfmemalloc(page))) { 3521 cache_free_pfmemalloc(cachep, page, objp); 3522 return; 3523 } 3524 } 3525 3526 ac->entry[ac->avail++] = objp; 3527 } 3528 3529 /** 3530 * kmem_cache_alloc - Allocate an object 3531 * @cachep: The cache to allocate from. 3532 * @flags: See kmalloc(). 3533 * 3534 * Allocate an object from this cache. The flags are only relevant 3535 * if the cache has no available objects. 3536 */ 3537 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3538 { 3539 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3540 3541 ret = kasan_slab_alloc(cachep, ret, flags); 3542 trace_kmem_cache_alloc(_RET_IP_, ret, 3543 cachep->object_size, cachep->size, flags); 3544 3545 return ret; 3546 } 3547 EXPORT_SYMBOL(kmem_cache_alloc); 3548 3549 static __always_inline void 3550 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, 3551 size_t size, void **p, unsigned long caller) 3552 { 3553 size_t i; 3554 3555 for (i = 0; i < size; i++) 3556 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); 3557 } 3558 3559 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3560 void **p) 3561 { 3562 size_t i; 3563 3564 s = slab_pre_alloc_hook(s, flags); 3565 if (!s) 3566 return 0; 3567 3568 cache_alloc_debugcheck_before(s, flags); 3569 3570 local_irq_disable(); 3571 for (i = 0; i < size; i++) { 3572 void *objp = __do_cache_alloc(s, flags); 3573 3574 if (unlikely(!objp)) 3575 goto error; 3576 p[i] = objp; 3577 } 3578 local_irq_enable(); 3579 3580 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); 3581 3582 /* Clear memory outside IRQ disabled section */ 3583 if (unlikely(flags & __GFP_ZERO)) 3584 for (i = 0; i < size; i++) 3585 memset(p[i], 0, s->object_size); 3586 3587 slab_post_alloc_hook(s, flags, size, p); 3588 /* FIXME: Trace call missing. Christoph would like a bulk variant */ 3589 return size; 3590 error: 3591 local_irq_enable(); 3592 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); 3593 slab_post_alloc_hook(s, flags, i, p); 3594 __kmem_cache_free_bulk(s, i, p); 3595 return 0; 3596 } 3597 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3598 3599 #ifdef CONFIG_TRACING 3600 void * 3601 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3602 { 3603 void *ret; 3604 3605 ret = slab_alloc(cachep, flags, _RET_IP_); 3606 3607 ret = kasan_kmalloc(cachep, ret, size, flags); 3608 trace_kmalloc(_RET_IP_, ret, 3609 size, cachep->size, flags); 3610 return ret; 3611 } 3612 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3613 #endif 3614 3615 #ifdef CONFIG_NUMA 3616 /** 3617 * kmem_cache_alloc_node - Allocate an object on the specified node 3618 * @cachep: The cache to allocate from. 3619 * @flags: See kmalloc(). 3620 * @nodeid: node number of the target node. 3621 * 3622 * Identical to kmem_cache_alloc but it will allocate memory on the given 3623 * node, which can improve the performance for cpu bound structures. 3624 * 3625 * Fallback to other node is possible if __GFP_THISNODE is not set. 3626 */ 3627 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3628 { 3629 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3630 3631 ret = kasan_slab_alloc(cachep, ret, flags); 3632 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3633 cachep->object_size, cachep->size, 3634 flags, nodeid); 3635 3636 return ret; 3637 } 3638 EXPORT_SYMBOL(kmem_cache_alloc_node); 3639 3640 #ifdef CONFIG_TRACING 3641 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3642 gfp_t flags, 3643 int nodeid, 3644 size_t size) 3645 { 3646 void *ret; 3647 3648 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3649 3650 ret = kasan_kmalloc(cachep, ret, size, flags); 3651 trace_kmalloc_node(_RET_IP_, ret, 3652 size, cachep->size, 3653 flags, nodeid); 3654 return ret; 3655 } 3656 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3657 #endif 3658 3659 static __always_inline void * 3660 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3661 { 3662 struct kmem_cache *cachep; 3663 void *ret; 3664 3665 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3666 return NULL; 3667 cachep = kmalloc_slab(size, flags); 3668 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3669 return cachep; 3670 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); 3671 ret = kasan_kmalloc(cachep, ret, size, flags); 3672 3673 return ret; 3674 } 3675 3676 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3677 { 3678 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3679 } 3680 EXPORT_SYMBOL(__kmalloc_node); 3681 3682 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3683 int node, unsigned long caller) 3684 { 3685 return __do_kmalloc_node(size, flags, node, caller); 3686 } 3687 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3688 #endif /* CONFIG_NUMA */ 3689 3690 /** 3691 * __do_kmalloc - allocate memory 3692 * @size: how many bytes of memory are required. 3693 * @flags: the type of memory to allocate (see kmalloc). 3694 * @caller: function caller for debug tracking of the caller 3695 */ 3696 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3697 unsigned long caller) 3698 { 3699 struct kmem_cache *cachep; 3700 void *ret; 3701 3702 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3703 return NULL; 3704 cachep = kmalloc_slab(size, flags); 3705 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3706 return cachep; 3707 ret = slab_alloc(cachep, flags, caller); 3708 3709 ret = kasan_kmalloc(cachep, ret, size, flags); 3710 trace_kmalloc(caller, ret, 3711 size, cachep->size, flags); 3712 3713 return ret; 3714 } 3715 3716 void *__kmalloc(size_t size, gfp_t flags) 3717 { 3718 return __do_kmalloc(size, flags, _RET_IP_); 3719 } 3720 EXPORT_SYMBOL(__kmalloc); 3721 3722 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3723 { 3724 return __do_kmalloc(size, flags, caller); 3725 } 3726 EXPORT_SYMBOL(__kmalloc_track_caller); 3727 3728 /** 3729 * kmem_cache_free - Deallocate an object 3730 * @cachep: The cache the allocation was from. 3731 * @objp: The previously allocated object. 3732 * 3733 * Free an object which was previously allocated from this 3734 * cache. 3735 */ 3736 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3737 { 3738 unsigned long flags; 3739 cachep = cache_from_obj(cachep, objp); 3740 if (!cachep) 3741 return; 3742 3743 local_irq_save(flags); 3744 debug_check_no_locks_freed(objp, cachep->object_size); 3745 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3746 debug_check_no_obj_freed(objp, cachep->object_size); 3747 __cache_free(cachep, objp, _RET_IP_); 3748 local_irq_restore(flags); 3749 3750 trace_kmem_cache_free(_RET_IP_, objp); 3751 } 3752 EXPORT_SYMBOL(kmem_cache_free); 3753 3754 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) 3755 { 3756 struct kmem_cache *s; 3757 size_t i; 3758 3759 local_irq_disable(); 3760 for (i = 0; i < size; i++) { 3761 void *objp = p[i]; 3762 3763 if (!orig_s) /* called via kfree_bulk */ 3764 s = virt_to_cache(objp); 3765 else 3766 s = cache_from_obj(orig_s, objp); 3767 3768 debug_check_no_locks_freed(objp, s->object_size); 3769 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 3770 debug_check_no_obj_freed(objp, s->object_size); 3771 3772 __cache_free(s, objp, _RET_IP_); 3773 } 3774 local_irq_enable(); 3775 3776 /* FIXME: add tracing */ 3777 } 3778 EXPORT_SYMBOL(kmem_cache_free_bulk); 3779 3780 /** 3781 * kfree - free previously allocated memory 3782 * @objp: pointer returned by kmalloc. 3783 * 3784 * If @objp is NULL, no operation is performed. 3785 * 3786 * Don't free memory not originally allocated by kmalloc() 3787 * or you will run into trouble. 3788 */ 3789 void kfree(const void *objp) 3790 { 3791 struct kmem_cache *c; 3792 unsigned long flags; 3793 3794 trace_kfree(_RET_IP_, objp); 3795 3796 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3797 return; 3798 local_irq_save(flags); 3799 kfree_debugcheck(objp); 3800 c = virt_to_cache(objp); 3801 debug_check_no_locks_freed(objp, c->object_size); 3802 3803 debug_check_no_obj_freed(objp, c->object_size); 3804 __cache_free(c, (void *)objp, _RET_IP_); 3805 local_irq_restore(flags); 3806 } 3807 EXPORT_SYMBOL(kfree); 3808 3809 /* 3810 * This initializes kmem_cache_node or resizes various caches for all nodes. 3811 */ 3812 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) 3813 { 3814 int ret; 3815 int node; 3816 struct kmem_cache_node *n; 3817 3818 for_each_online_node(node) { 3819 ret = setup_kmem_cache_node(cachep, node, gfp, true); 3820 if (ret) 3821 goto fail; 3822 3823 } 3824 3825 return 0; 3826 3827 fail: 3828 if (!cachep->list.next) { 3829 /* Cache is not active yet. Roll back what we did */ 3830 node--; 3831 while (node >= 0) { 3832 n = get_node(cachep, node); 3833 if (n) { 3834 kfree(n->shared); 3835 free_alien_cache(n->alien); 3836 kfree(n); 3837 cachep->node[node] = NULL; 3838 } 3839 node--; 3840 } 3841 } 3842 return -ENOMEM; 3843 } 3844 3845 /* Always called with the slab_mutex held */ 3846 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, 3847 int batchcount, int shared, gfp_t gfp) 3848 { 3849 struct array_cache __percpu *cpu_cache, *prev; 3850 int cpu; 3851 3852 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); 3853 if (!cpu_cache) 3854 return -ENOMEM; 3855 3856 prev = cachep->cpu_cache; 3857 cachep->cpu_cache = cpu_cache; 3858 /* 3859 * Without a previous cpu_cache there's no need to synchronize remote 3860 * cpus, so skip the IPIs. 3861 */ 3862 if (prev) 3863 kick_all_cpus_sync(); 3864 3865 check_irq_on(); 3866 cachep->batchcount = batchcount; 3867 cachep->limit = limit; 3868 cachep->shared = shared; 3869 3870 if (!prev) 3871 goto setup_node; 3872 3873 for_each_online_cpu(cpu) { 3874 LIST_HEAD(list); 3875 int node; 3876 struct kmem_cache_node *n; 3877 struct array_cache *ac = per_cpu_ptr(prev, cpu); 3878 3879 node = cpu_to_mem(cpu); 3880 n = get_node(cachep, node); 3881 spin_lock_irq(&n->list_lock); 3882 free_block(cachep, ac->entry, ac->avail, node, &list); 3883 spin_unlock_irq(&n->list_lock); 3884 slabs_destroy(cachep, &list); 3885 } 3886 free_percpu(prev); 3887 3888 setup_node: 3889 return setup_kmem_cache_nodes(cachep, gfp); 3890 } 3891 3892 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3893 int batchcount, int shared, gfp_t gfp) 3894 { 3895 int ret; 3896 struct kmem_cache *c; 3897 3898 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3899 3900 if (slab_state < FULL) 3901 return ret; 3902 3903 if ((ret < 0) || !is_root_cache(cachep)) 3904 return ret; 3905 3906 lockdep_assert_held(&slab_mutex); 3907 for_each_memcg_cache(c, cachep) { 3908 /* return value determined by the root cache only */ 3909 __do_tune_cpucache(c, limit, batchcount, shared, gfp); 3910 } 3911 3912 return ret; 3913 } 3914 3915 /* Called with slab_mutex held always */ 3916 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3917 { 3918 int err; 3919 int limit = 0; 3920 int shared = 0; 3921 int batchcount = 0; 3922 3923 err = cache_random_seq_create(cachep, cachep->num, gfp); 3924 if (err) 3925 goto end; 3926 3927 if (!is_root_cache(cachep)) { 3928 struct kmem_cache *root = memcg_root_cache(cachep); 3929 limit = root->limit; 3930 shared = root->shared; 3931 batchcount = root->batchcount; 3932 } 3933 3934 if (limit && shared && batchcount) 3935 goto skip_setup; 3936 /* 3937 * The head array serves three purposes: 3938 * - create a LIFO ordering, i.e. return objects that are cache-warm 3939 * - reduce the number of spinlock operations. 3940 * - reduce the number of linked list operations on the slab and 3941 * bufctl chains: array operations are cheaper. 3942 * The numbers are guessed, we should auto-tune as described by 3943 * Bonwick. 3944 */ 3945 if (cachep->size > 131072) 3946 limit = 1; 3947 else if (cachep->size > PAGE_SIZE) 3948 limit = 8; 3949 else if (cachep->size > 1024) 3950 limit = 24; 3951 else if (cachep->size > 256) 3952 limit = 54; 3953 else 3954 limit = 120; 3955 3956 /* 3957 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3958 * allocation behaviour: Most allocs on one cpu, most free operations 3959 * on another cpu. For these cases, an efficient object passing between 3960 * cpus is necessary. This is provided by a shared array. The array 3961 * replaces Bonwick's magazine layer. 3962 * On uniprocessor, it's functionally equivalent (but less efficient) 3963 * to a larger limit. Thus disabled by default. 3964 */ 3965 shared = 0; 3966 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3967 shared = 8; 3968 3969 #if DEBUG 3970 /* 3971 * With debugging enabled, large batchcount lead to excessively long 3972 * periods with disabled local interrupts. Limit the batchcount 3973 */ 3974 if (limit > 32) 3975 limit = 32; 3976 #endif 3977 batchcount = (limit + 1) / 2; 3978 skip_setup: 3979 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3980 end: 3981 if (err) 3982 pr_err("enable_cpucache failed for %s, error %d\n", 3983 cachep->name, -err); 3984 return err; 3985 } 3986 3987 /* 3988 * Drain an array if it contains any elements taking the node lock only if 3989 * necessary. Note that the node listlock also protects the array_cache 3990 * if drain_array() is used on the shared array. 3991 */ 3992 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3993 struct array_cache *ac, int node) 3994 { 3995 LIST_HEAD(list); 3996 3997 /* ac from n->shared can be freed if we don't hold the slab_mutex. */ 3998 check_mutex_acquired(); 3999 4000 if (!ac || !ac->avail) 4001 return; 4002 4003 if (ac->touched) { 4004 ac->touched = 0; 4005 return; 4006 } 4007 4008 spin_lock_irq(&n->list_lock); 4009 drain_array_locked(cachep, ac, node, false, &list); 4010 spin_unlock_irq(&n->list_lock); 4011 4012 slabs_destroy(cachep, &list); 4013 } 4014 4015 /** 4016 * cache_reap - Reclaim memory from caches. 4017 * @w: work descriptor 4018 * 4019 * Called from workqueue/eventd every few seconds. 4020 * Purpose: 4021 * - clear the per-cpu caches for this CPU. 4022 * - return freeable pages to the main free memory pool. 4023 * 4024 * If we cannot acquire the cache chain mutex then just give up - we'll try 4025 * again on the next iteration. 4026 */ 4027 static void cache_reap(struct work_struct *w) 4028 { 4029 struct kmem_cache *searchp; 4030 struct kmem_cache_node *n; 4031 int node = numa_mem_id(); 4032 struct delayed_work *work = to_delayed_work(w); 4033 4034 if (!mutex_trylock(&slab_mutex)) 4035 /* Give up. Setup the next iteration. */ 4036 goto out; 4037 4038 list_for_each_entry(searchp, &slab_caches, list) { 4039 check_irq_on(); 4040 4041 /* 4042 * We only take the node lock if absolutely necessary and we 4043 * have established with reasonable certainty that 4044 * we can do some work if the lock was obtained. 4045 */ 4046 n = get_node(searchp, node); 4047 4048 reap_alien(searchp, n); 4049 4050 drain_array(searchp, n, cpu_cache_get(searchp), node); 4051 4052 /* 4053 * These are racy checks but it does not matter 4054 * if we skip one check or scan twice. 4055 */ 4056 if (time_after(n->next_reap, jiffies)) 4057 goto next; 4058 4059 n->next_reap = jiffies + REAPTIMEOUT_NODE; 4060 4061 drain_array(searchp, n, n->shared, node); 4062 4063 if (n->free_touched) 4064 n->free_touched = 0; 4065 else { 4066 int freed; 4067 4068 freed = drain_freelist(searchp, n, (n->free_limit + 4069 5 * searchp->num - 1) / (5 * searchp->num)); 4070 STATS_ADD_REAPED(searchp, freed); 4071 } 4072 next: 4073 cond_resched(); 4074 } 4075 check_irq_on(); 4076 mutex_unlock(&slab_mutex); 4077 next_reap_node(); 4078 out: 4079 /* Set up the next iteration */ 4080 schedule_delayed_work_on(smp_processor_id(), work, 4081 round_jiffies_relative(REAPTIMEOUT_AC)); 4082 } 4083 4084 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 4085 { 4086 unsigned long active_objs, num_objs, active_slabs; 4087 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; 4088 unsigned long free_slabs = 0; 4089 int node; 4090 struct kmem_cache_node *n; 4091 4092 for_each_kmem_cache_node(cachep, node, n) { 4093 check_irq_on(); 4094 spin_lock_irq(&n->list_lock); 4095 4096 total_slabs += n->total_slabs; 4097 free_slabs += n->free_slabs; 4098 free_objs += n->free_objects; 4099 4100 if (n->shared) 4101 shared_avail += n->shared->avail; 4102 4103 spin_unlock_irq(&n->list_lock); 4104 } 4105 num_objs = total_slabs * cachep->num; 4106 active_slabs = total_slabs - free_slabs; 4107 active_objs = num_objs - free_objs; 4108 4109 sinfo->active_objs = active_objs; 4110 sinfo->num_objs = num_objs; 4111 sinfo->active_slabs = active_slabs; 4112 sinfo->num_slabs = total_slabs; 4113 sinfo->shared_avail = shared_avail; 4114 sinfo->limit = cachep->limit; 4115 sinfo->batchcount = cachep->batchcount; 4116 sinfo->shared = cachep->shared; 4117 sinfo->objects_per_slab = cachep->num; 4118 sinfo->cache_order = cachep->gfporder; 4119 } 4120 4121 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 4122 { 4123 #if STATS 4124 { /* node stats */ 4125 unsigned long high = cachep->high_mark; 4126 unsigned long allocs = cachep->num_allocations; 4127 unsigned long grown = cachep->grown; 4128 unsigned long reaped = cachep->reaped; 4129 unsigned long errors = cachep->errors; 4130 unsigned long max_freeable = cachep->max_freeable; 4131 unsigned long node_allocs = cachep->node_allocs; 4132 unsigned long node_frees = cachep->node_frees; 4133 unsigned long overflows = cachep->node_overflow; 4134 4135 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", 4136 allocs, high, grown, 4137 reaped, errors, max_freeable, node_allocs, 4138 node_frees, overflows); 4139 } 4140 /* cpu stats */ 4141 { 4142 unsigned long allochit = atomic_read(&cachep->allochit); 4143 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4144 unsigned long freehit = atomic_read(&cachep->freehit); 4145 unsigned long freemiss = atomic_read(&cachep->freemiss); 4146 4147 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4148 allochit, allocmiss, freehit, freemiss); 4149 } 4150 #endif 4151 } 4152 4153 #define MAX_SLABINFO_WRITE 128 4154 /** 4155 * slabinfo_write - Tuning for the slab allocator 4156 * @file: unused 4157 * @buffer: user buffer 4158 * @count: data length 4159 * @ppos: unused 4160 */ 4161 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4162 size_t count, loff_t *ppos) 4163 { 4164 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4165 int limit, batchcount, shared, res; 4166 struct kmem_cache *cachep; 4167 4168 if (count > MAX_SLABINFO_WRITE) 4169 return -EINVAL; 4170 if (copy_from_user(&kbuf, buffer, count)) 4171 return -EFAULT; 4172 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4173 4174 tmp = strchr(kbuf, ' '); 4175 if (!tmp) 4176 return -EINVAL; 4177 *tmp = '\0'; 4178 tmp++; 4179 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4180 return -EINVAL; 4181 4182 /* Find the cache in the chain of caches. */ 4183 mutex_lock(&slab_mutex); 4184 res = -EINVAL; 4185 list_for_each_entry(cachep, &slab_caches, list) { 4186 if (!strcmp(cachep->name, kbuf)) { 4187 if (limit < 1 || batchcount < 1 || 4188 batchcount > limit || shared < 0) { 4189 res = 0; 4190 } else { 4191 res = do_tune_cpucache(cachep, limit, 4192 batchcount, shared, 4193 GFP_KERNEL); 4194 } 4195 break; 4196 } 4197 } 4198 mutex_unlock(&slab_mutex); 4199 if (res >= 0) 4200 res = count; 4201 return res; 4202 } 4203 4204 #ifdef CONFIG_DEBUG_SLAB_LEAK 4205 4206 static inline int add_caller(unsigned long *n, unsigned long v) 4207 { 4208 unsigned long *p; 4209 int l; 4210 if (!v) 4211 return 1; 4212 l = n[1]; 4213 p = n + 2; 4214 while (l) { 4215 int i = l/2; 4216 unsigned long *q = p + 2 * i; 4217 if (*q == v) { 4218 q[1]++; 4219 return 1; 4220 } 4221 if (*q > v) { 4222 l = i; 4223 } else { 4224 p = q + 2; 4225 l -= i + 1; 4226 } 4227 } 4228 if (++n[1] == n[0]) 4229 return 0; 4230 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4231 p[0] = v; 4232 p[1] = 1; 4233 return 1; 4234 } 4235 4236 static void handle_slab(unsigned long *n, struct kmem_cache *c, 4237 struct page *page) 4238 { 4239 void *p; 4240 int i, j; 4241 unsigned long v; 4242 4243 if (n[0] == n[1]) 4244 return; 4245 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4246 bool active = true; 4247 4248 for (j = page->active; j < c->num; j++) { 4249 if (get_free_obj(page, j) == i) { 4250 active = false; 4251 break; 4252 } 4253 } 4254 4255 if (!active) 4256 continue; 4257 4258 /* 4259 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table 4260 * mapping is established when actual object allocation and 4261 * we could mistakenly access the unmapped object in the cpu 4262 * cache. 4263 */ 4264 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v))) 4265 continue; 4266 4267 if (!add_caller(n, v)) 4268 return; 4269 } 4270 } 4271 4272 static void show_symbol(struct seq_file *m, unsigned long address) 4273 { 4274 #ifdef CONFIG_KALLSYMS 4275 unsigned long offset, size; 4276 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4277 4278 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4279 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4280 if (modname[0]) 4281 seq_printf(m, " [%s]", modname); 4282 return; 4283 } 4284 #endif 4285 seq_printf(m, "%px", (void *)address); 4286 } 4287 4288 static int leaks_show(struct seq_file *m, void *p) 4289 { 4290 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4291 struct page *page; 4292 struct kmem_cache_node *n; 4293 const char *name; 4294 unsigned long *x = m->private; 4295 int node; 4296 int i; 4297 4298 if (!(cachep->flags & SLAB_STORE_USER)) 4299 return 0; 4300 if (!(cachep->flags & SLAB_RED_ZONE)) 4301 return 0; 4302 4303 /* 4304 * Set store_user_clean and start to grab stored user information 4305 * for all objects on this cache. If some alloc/free requests comes 4306 * during the processing, information would be wrong so restart 4307 * whole processing. 4308 */ 4309 do { 4310 set_store_user_clean(cachep); 4311 drain_cpu_caches(cachep); 4312 4313 x[1] = 0; 4314 4315 for_each_kmem_cache_node(cachep, node, n) { 4316 4317 check_irq_on(); 4318 spin_lock_irq(&n->list_lock); 4319 4320 list_for_each_entry(page, &n->slabs_full, lru) 4321 handle_slab(x, cachep, page); 4322 list_for_each_entry(page, &n->slabs_partial, lru) 4323 handle_slab(x, cachep, page); 4324 spin_unlock_irq(&n->list_lock); 4325 } 4326 } while (!is_store_user_clean(cachep)); 4327 4328 name = cachep->name; 4329 if (x[0] == x[1]) { 4330 /* Increase the buffer size */ 4331 mutex_unlock(&slab_mutex); 4332 m->private = kcalloc(x[0] * 4, sizeof(unsigned long), 4333 GFP_KERNEL); 4334 if (!m->private) { 4335 /* Too bad, we are really out */ 4336 m->private = x; 4337 mutex_lock(&slab_mutex); 4338 return -ENOMEM; 4339 } 4340 *(unsigned long *)m->private = x[0] * 2; 4341 kfree(x); 4342 mutex_lock(&slab_mutex); 4343 /* Now make sure this entry will be retried */ 4344 m->count = m->size; 4345 return 0; 4346 } 4347 for (i = 0; i < x[1]; i++) { 4348 seq_printf(m, "%s: %lu ", name, x[2*i+3]); 4349 show_symbol(m, x[2*i+2]); 4350 seq_putc(m, '\n'); 4351 } 4352 4353 return 0; 4354 } 4355 4356 static const struct seq_operations slabstats_op = { 4357 .start = slab_start, 4358 .next = slab_next, 4359 .stop = slab_stop, 4360 .show = leaks_show, 4361 }; 4362 4363 static int slabstats_open(struct inode *inode, struct file *file) 4364 { 4365 unsigned long *n; 4366 4367 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); 4368 if (!n) 4369 return -ENOMEM; 4370 4371 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4372 4373 return 0; 4374 } 4375 4376 static const struct file_operations proc_slabstats_operations = { 4377 .open = slabstats_open, 4378 .read = seq_read, 4379 .llseek = seq_lseek, 4380 .release = seq_release_private, 4381 }; 4382 #endif 4383 4384 static int __init slab_proc_init(void) 4385 { 4386 #ifdef CONFIG_DEBUG_SLAB_LEAK 4387 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4388 #endif 4389 return 0; 4390 } 4391 module_init(slab_proc_init); 4392 4393 #ifdef CONFIG_HARDENED_USERCOPY 4394 /* 4395 * Rejects incorrectly sized objects and objects that are to be copied 4396 * to/from userspace but do not fall entirely within the containing slab 4397 * cache's usercopy region. 4398 * 4399 * Returns NULL if check passes, otherwise const char * to name of cache 4400 * to indicate an error. 4401 */ 4402 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4403 bool to_user) 4404 { 4405 struct kmem_cache *cachep; 4406 unsigned int objnr; 4407 unsigned long offset; 4408 4409 /* Find and validate object. */ 4410 cachep = page->slab_cache; 4411 objnr = obj_to_index(cachep, page, (void *)ptr); 4412 BUG_ON(objnr >= cachep->num); 4413 4414 /* Find offset within object. */ 4415 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); 4416 4417 /* Allow address range falling entirely within usercopy region. */ 4418 if (offset >= cachep->useroffset && 4419 offset - cachep->useroffset <= cachep->usersize && 4420 n <= cachep->useroffset - offset + cachep->usersize) 4421 return; 4422 4423 /* 4424 * If the copy is still within the allocated object, produce 4425 * a warning instead of rejecting the copy. This is intended 4426 * to be a temporary method to find any missing usercopy 4427 * whitelists. 4428 */ 4429 if (usercopy_fallback && 4430 offset <= cachep->object_size && 4431 n <= cachep->object_size - offset) { 4432 usercopy_warn("SLAB object", cachep->name, to_user, offset, n); 4433 return; 4434 } 4435 4436 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); 4437 } 4438 #endif /* CONFIG_HARDENED_USERCOPY */ 4439 4440 /** 4441 * ksize - get the actual amount of memory allocated for a given object 4442 * @objp: Pointer to the object 4443 * 4444 * kmalloc may internally round up allocations and return more memory 4445 * than requested. ksize() can be used to determine the actual amount of 4446 * memory allocated. The caller may use this additional memory, even though 4447 * a smaller amount of memory was initially specified with the kmalloc call. 4448 * The caller must guarantee that objp points to a valid object previously 4449 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4450 * must not be freed during the duration of the call. 4451 */ 4452 size_t ksize(const void *objp) 4453 { 4454 size_t size; 4455 4456 BUG_ON(!objp); 4457 if (unlikely(objp == ZERO_SIZE_PTR)) 4458 return 0; 4459 4460 size = virt_to_cache(objp)->object_size; 4461 /* We assume that ksize callers could use the whole allocated area, 4462 * so we need to unpoison this area. 4463 */ 4464 kasan_unpoison_shadow(objp, size); 4465 4466 return size; 4467 } 4468 EXPORT_SYMBOL(ksize); 4469