1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/slab.c 4 * Written by Mark Hemment, 1996/97. 5 * (markhe@nextd.demon.co.uk) 6 * 7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 8 * 9 * Major cleanup, different bufctl logic, per-cpu arrays 10 * (c) 2000 Manfred Spraul 11 * 12 * Cleanup, make the head arrays unconditional, preparation for NUMA 13 * (c) 2002 Manfred Spraul 14 * 15 * An implementation of the Slab Allocator as described in outline in; 16 * UNIX Internals: The New Frontiers by Uresh Vahalia 17 * Pub: Prentice Hall ISBN 0-13-101908-2 18 * or with a little more detail in; 19 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 20 * Jeff Bonwick (Sun Microsystems). 21 * Presented at: USENIX Summer 1994 Technical Conference 22 * 23 * The memory is organized in caches, one cache for each object type. 24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 25 * Each cache consists out of many slabs (they are small (usually one 26 * page long) and always contiguous), and each slab contains multiple 27 * initialized objects. 28 * 29 * This means, that your constructor is used only for newly allocated 30 * slabs and you must pass objects with the same initializations to 31 * kmem_cache_free. 32 * 33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 34 * normal). If you need a special memory type, then must create a new 35 * cache for that memory type. 36 * 37 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 38 * full slabs with 0 free objects 39 * partial slabs 40 * empty slabs with no allocated objects 41 * 42 * If partial slabs exist, then new allocations come from these slabs, 43 * otherwise from empty slabs or new slabs are allocated. 44 * 45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 47 * 48 * Each cache has a short per-cpu head array, most allocs 49 * and frees go into that array, and if that array overflows, then 1/2 50 * of the entries in the array are given back into the global cache. 51 * The head array is strictly LIFO and should improve the cache hit rates. 52 * On SMP, it additionally reduces the spinlock operations. 53 * 54 * The c_cpuarray may not be read with enabled local interrupts - 55 * it's changed with a smp_call_function(). 56 * 57 * SMP synchronization: 58 * constructors and destructors are called without any locking. 59 * Several members in struct kmem_cache and struct slab never change, they 60 * are accessed without any locking. 61 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 62 * and local interrupts are disabled so slab code is preempt-safe. 63 * The non-constant members are protected with a per-cache irq spinlock. 64 * 65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 66 * in 2000 - many ideas in the current implementation are derived from 67 * his patch. 68 * 69 * Further notes from the original documentation: 70 * 71 * 11 April '97. Started multi-threading - markhe 72 * The global cache-chain is protected by the mutex 'slab_mutex'. 73 * The sem is only needed when accessing/extending the cache-chain, which 74 * can never happen inside an interrupt (kmem_cache_create(), 75 * kmem_cache_shrink() and kmem_cache_reap()). 76 * 77 * At present, each engine can be growing a cache. This should be blocked. 78 * 79 * 15 March 2005. NUMA slab allocator. 80 * Shai Fultheim <shai@scalex86.org>. 81 * Shobhit Dayal <shobhit@calsoftinc.com> 82 * Alok N Kataria <alokk@calsoftinc.com> 83 * Christoph Lameter <christoph@lameter.com> 84 * 85 * Modified the slab allocator to be node aware on NUMA systems. 86 * Each node has its own list of partial, free and full slabs. 87 * All object allocations for a node occur from node specific slab lists. 88 */ 89 90 #include <linux/slab.h> 91 #include <linux/mm.h> 92 #include <linux/poison.h> 93 #include <linux/swap.h> 94 #include <linux/cache.h> 95 #include <linux/interrupt.h> 96 #include <linux/init.h> 97 #include <linux/compiler.h> 98 #include <linux/cpuset.h> 99 #include <linux/proc_fs.h> 100 #include <linux/seq_file.h> 101 #include <linux/notifier.h> 102 #include <linux/kallsyms.h> 103 #include <linux/cpu.h> 104 #include <linux/sysctl.h> 105 #include <linux/module.h> 106 #include <linux/rcupdate.h> 107 #include <linux/string.h> 108 #include <linux/uaccess.h> 109 #include <linux/nodemask.h> 110 #include <linux/kmemleak.h> 111 #include <linux/mempolicy.h> 112 #include <linux/mutex.h> 113 #include <linux/fault-inject.h> 114 #include <linux/rtmutex.h> 115 #include <linux/reciprocal_div.h> 116 #include <linux/debugobjects.h> 117 #include <linux/memory.h> 118 #include <linux/prefetch.h> 119 #include <linux/sched/task_stack.h> 120 121 #include <net/sock.h> 122 123 #include <asm/cacheflush.h> 124 #include <asm/tlbflush.h> 125 #include <asm/page.h> 126 127 #include <trace/events/kmem.h> 128 129 #include "internal.h" 130 131 #include "slab.h" 132 133 /* 134 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 135 * 0 for faster, smaller code (especially in the critical paths). 136 * 137 * STATS - 1 to collect stats for /proc/slabinfo. 138 * 0 for faster, smaller code (especially in the critical paths). 139 * 140 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 141 */ 142 143 #ifdef CONFIG_DEBUG_SLAB 144 #define DEBUG 1 145 #define STATS 1 146 #define FORCED_DEBUG 1 147 #else 148 #define DEBUG 0 149 #define STATS 0 150 #define FORCED_DEBUG 0 151 #endif 152 153 /* Shouldn't this be in a header file somewhere? */ 154 #define BYTES_PER_WORD sizeof(void *) 155 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 156 157 #ifndef ARCH_KMALLOC_FLAGS 158 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 159 #endif 160 161 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 162 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 163 164 #if FREELIST_BYTE_INDEX 165 typedef unsigned char freelist_idx_t; 166 #else 167 typedef unsigned short freelist_idx_t; 168 #endif 169 170 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 171 172 /* 173 * struct array_cache 174 * 175 * Purpose: 176 * - LIFO ordering, to hand out cache-warm objects from _alloc 177 * - reduce the number of linked list operations 178 * - reduce spinlock operations 179 * 180 * The limit is stored in the per-cpu structure to reduce the data cache 181 * footprint. 182 * 183 */ 184 struct array_cache { 185 unsigned int avail; 186 unsigned int limit; 187 unsigned int batchcount; 188 unsigned int touched; 189 void *entry[]; /* 190 * Must have this definition in here for the proper 191 * alignment of array_cache. Also simplifies accessing 192 * the entries. 193 */ 194 }; 195 196 struct alien_cache { 197 spinlock_t lock; 198 struct array_cache ac; 199 }; 200 201 /* 202 * Need this for bootstrapping a per node allocator. 203 */ 204 #define NUM_INIT_LISTS (2 * MAX_NUMNODES) 205 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 206 #define CACHE_CACHE 0 207 #define SIZE_NODE (MAX_NUMNODES) 208 209 static int drain_freelist(struct kmem_cache *cache, 210 struct kmem_cache_node *n, int tofree); 211 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 212 int node, struct list_head *list); 213 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); 214 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 215 static void cache_reap(struct work_struct *unused); 216 217 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 218 void **list); 219 static inline void fixup_slab_list(struct kmem_cache *cachep, 220 struct kmem_cache_node *n, struct page *page, 221 void **list); 222 static int slab_early_init = 1; 223 224 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 225 226 static void kmem_cache_node_init(struct kmem_cache_node *parent) 227 { 228 INIT_LIST_HEAD(&parent->slabs_full); 229 INIT_LIST_HEAD(&parent->slabs_partial); 230 INIT_LIST_HEAD(&parent->slabs_free); 231 parent->total_slabs = 0; 232 parent->free_slabs = 0; 233 parent->shared = NULL; 234 parent->alien = NULL; 235 parent->colour_next = 0; 236 spin_lock_init(&parent->list_lock); 237 parent->free_objects = 0; 238 parent->free_touched = 0; 239 } 240 241 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 242 do { \ 243 INIT_LIST_HEAD(listp); \ 244 list_splice(&get_node(cachep, nodeid)->slab, listp); \ 245 } while (0) 246 247 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 248 do { \ 249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 252 } while (0) 253 254 #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U) 255 #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U) 256 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) 257 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 258 259 #define BATCHREFILL_LIMIT 16 260 /* 261 * Optimization question: fewer reaps means less probability for unnessary 262 * cpucache drain/refill cycles. 263 * 264 * OTOH the cpuarrays can contain lots of objects, 265 * which could lock up otherwise freeable slabs. 266 */ 267 #define REAPTIMEOUT_AC (2*HZ) 268 #define REAPTIMEOUT_NODE (4*HZ) 269 270 #if STATS 271 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 272 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 273 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 274 #define STATS_INC_GROWN(x) ((x)->grown++) 275 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 276 #define STATS_SET_HIGH(x) \ 277 do { \ 278 if ((x)->num_active > (x)->high_mark) \ 279 (x)->high_mark = (x)->num_active; \ 280 } while (0) 281 #define STATS_INC_ERR(x) ((x)->errors++) 282 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 283 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 284 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 285 #define STATS_SET_FREEABLE(x, i) \ 286 do { \ 287 if ((x)->max_freeable < i) \ 288 (x)->max_freeable = i; \ 289 } while (0) 290 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 291 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 292 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 293 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 294 #else 295 #define STATS_INC_ACTIVE(x) do { } while (0) 296 #define STATS_DEC_ACTIVE(x) do { } while (0) 297 #define STATS_INC_ALLOCED(x) do { } while (0) 298 #define STATS_INC_GROWN(x) do { } while (0) 299 #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) 300 #define STATS_SET_HIGH(x) do { } while (0) 301 #define STATS_INC_ERR(x) do { } while (0) 302 #define STATS_INC_NODEALLOCS(x) do { } while (0) 303 #define STATS_INC_NODEFREES(x) do { } while (0) 304 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 305 #define STATS_SET_FREEABLE(x, i) do { } while (0) 306 #define STATS_INC_ALLOCHIT(x) do { } while (0) 307 #define STATS_INC_ALLOCMISS(x) do { } while (0) 308 #define STATS_INC_FREEHIT(x) do { } while (0) 309 #define STATS_INC_FREEMISS(x) do { } while (0) 310 #endif 311 312 #if DEBUG 313 314 /* 315 * memory layout of objects: 316 * 0 : objp 317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 318 * the end of an object is aligned with the end of the real 319 * allocation. Catches writes behind the end of the allocation. 320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 321 * redzone word. 322 * cachep->obj_offset: The real object. 323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 324 * cachep->size - 1* BYTES_PER_WORD: last caller address 325 * [BYTES_PER_WORD long] 326 */ 327 static int obj_offset(struct kmem_cache *cachep) 328 { 329 return cachep->obj_offset; 330 } 331 332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 333 { 334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 335 return (unsigned long long*) (objp + obj_offset(cachep) - 336 sizeof(unsigned long long)); 337 } 338 339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 340 { 341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 342 if (cachep->flags & SLAB_STORE_USER) 343 return (unsigned long long *)(objp + cachep->size - 344 sizeof(unsigned long long) - 345 REDZONE_ALIGN); 346 return (unsigned long long *) (objp + cachep->size - 347 sizeof(unsigned long long)); 348 } 349 350 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 351 { 352 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 353 return (void **)(objp + cachep->size - BYTES_PER_WORD); 354 } 355 356 #else 357 358 #define obj_offset(x) 0 359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 362 363 #endif 364 365 /* 366 * Do not go above this order unless 0 objects fit into the slab or 367 * overridden on the command line. 368 */ 369 #define SLAB_MAX_ORDER_HI 1 370 #define SLAB_MAX_ORDER_LO 0 371 static int slab_max_order = SLAB_MAX_ORDER_LO; 372 static bool slab_max_order_set __initdata; 373 374 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, 375 unsigned int idx) 376 { 377 return page->s_mem + cache->size * idx; 378 } 379 380 #define BOOT_CPUCACHE_ENTRIES 1 381 /* internal cache of cache description objs */ 382 static struct kmem_cache kmem_cache_boot = { 383 .batchcount = 1, 384 .limit = BOOT_CPUCACHE_ENTRIES, 385 .shared = 1, 386 .size = sizeof(struct kmem_cache), 387 .name = "kmem_cache", 388 }; 389 390 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 391 392 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 393 { 394 return this_cpu_ptr(cachep->cpu_cache); 395 } 396 397 /* 398 * Calculate the number of objects and left-over bytes for a given buffer size. 399 */ 400 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, 401 slab_flags_t flags, size_t *left_over) 402 { 403 unsigned int num; 404 size_t slab_size = PAGE_SIZE << gfporder; 405 406 /* 407 * The slab management structure can be either off the slab or 408 * on it. For the latter case, the memory allocated for a 409 * slab is used for: 410 * 411 * - @buffer_size bytes for each object 412 * - One freelist_idx_t for each object 413 * 414 * We don't need to consider alignment of freelist because 415 * freelist will be at the end of slab page. The objects will be 416 * at the correct alignment. 417 * 418 * If the slab management structure is off the slab, then the 419 * alignment will already be calculated into the size. Because 420 * the slabs are all pages aligned, the objects will be at the 421 * correct alignment when allocated. 422 */ 423 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { 424 num = slab_size / buffer_size; 425 *left_over = slab_size % buffer_size; 426 } else { 427 num = slab_size / (buffer_size + sizeof(freelist_idx_t)); 428 *left_over = slab_size % 429 (buffer_size + sizeof(freelist_idx_t)); 430 } 431 432 return num; 433 } 434 435 #if DEBUG 436 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 437 438 static void __slab_error(const char *function, struct kmem_cache *cachep, 439 char *msg) 440 { 441 pr_err("slab error in %s(): cache `%s': %s\n", 442 function, cachep->name, msg); 443 dump_stack(); 444 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 445 } 446 #endif 447 448 /* 449 * By default on NUMA we use alien caches to stage the freeing of 450 * objects allocated from other nodes. This causes massive memory 451 * inefficiencies when using fake NUMA setup to split memory into a 452 * large number of small nodes, so it can be disabled on the command 453 * line 454 */ 455 456 static int use_alien_caches __read_mostly = 1; 457 static int __init noaliencache_setup(char *s) 458 { 459 use_alien_caches = 0; 460 return 1; 461 } 462 __setup("noaliencache", noaliencache_setup); 463 464 static int __init slab_max_order_setup(char *str) 465 { 466 get_option(&str, &slab_max_order); 467 slab_max_order = slab_max_order < 0 ? 0 : 468 min(slab_max_order, MAX_ORDER - 1); 469 slab_max_order_set = true; 470 471 return 1; 472 } 473 __setup("slab_max_order=", slab_max_order_setup); 474 475 #ifdef CONFIG_NUMA 476 /* 477 * Special reaping functions for NUMA systems called from cache_reap(). 478 * These take care of doing round robin flushing of alien caches (containing 479 * objects freed on different nodes from which they were allocated) and the 480 * flushing of remote pcps by calling drain_node_pages. 481 */ 482 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 483 484 static void init_reap_node(int cpu) 485 { 486 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), 487 node_online_map); 488 } 489 490 static void next_reap_node(void) 491 { 492 int node = __this_cpu_read(slab_reap_node); 493 494 node = next_node_in(node, node_online_map); 495 __this_cpu_write(slab_reap_node, node); 496 } 497 498 #else 499 #define init_reap_node(cpu) do { } while (0) 500 #define next_reap_node(void) do { } while (0) 501 #endif 502 503 /* 504 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 505 * via the workqueue/eventd. 506 * Add the CPU number into the expiration time to minimize the possibility of 507 * the CPUs getting into lockstep and contending for the global cache chain 508 * lock. 509 */ 510 static void start_cpu_timer(int cpu) 511 { 512 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 513 514 if (reap_work->work.func == NULL) { 515 init_reap_node(cpu); 516 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 517 schedule_delayed_work_on(cpu, reap_work, 518 __round_jiffies_relative(HZ, cpu)); 519 } 520 } 521 522 static void init_arraycache(struct array_cache *ac, int limit, int batch) 523 { 524 if (ac) { 525 ac->avail = 0; 526 ac->limit = limit; 527 ac->batchcount = batch; 528 ac->touched = 0; 529 } 530 } 531 532 static struct array_cache *alloc_arraycache(int node, int entries, 533 int batchcount, gfp_t gfp) 534 { 535 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); 536 struct array_cache *ac = NULL; 537 538 ac = kmalloc_node(memsize, gfp, node); 539 /* 540 * The array_cache structures contain pointers to free object. 541 * However, when such objects are allocated or transferred to another 542 * cache the pointers are not cleared and they could be counted as 543 * valid references during a kmemleak scan. Therefore, kmemleak must 544 * not scan such objects. 545 */ 546 kmemleak_no_scan(ac); 547 init_arraycache(ac, entries, batchcount); 548 return ac; 549 } 550 551 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, 552 struct page *page, void *objp) 553 { 554 struct kmem_cache_node *n; 555 int page_node; 556 LIST_HEAD(list); 557 558 page_node = page_to_nid(page); 559 n = get_node(cachep, page_node); 560 561 spin_lock(&n->list_lock); 562 free_block(cachep, &objp, 1, page_node, &list); 563 spin_unlock(&n->list_lock); 564 565 slabs_destroy(cachep, &list); 566 } 567 568 /* 569 * Transfer objects in one arraycache to another. 570 * Locking must be handled by the caller. 571 * 572 * Return the number of entries transferred. 573 */ 574 static int transfer_objects(struct array_cache *to, 575 struct array_cache *from, unsigned int max) 576 { 577 /* Figure out how many entries to transfer */ 578 int nr = min3(from->avail, max, to->limit - to->avail); 579 580 if (!nr) 581 return 0; 582 583 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 584 sizeof(void *) *nr); 585 586 from->avail -= nr; 587 to->avail += nr; 588 return nr; 589 } 590 591 /* &alien->lock must be held by alien callers. */ 592 static __always_inline void __free_one(struct array_cache *ac, void *objp) 593 { 594 /* Avoid trivial double-free. */ 595 if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 596 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp)) 597 return; 598 ac->entry[ac->avail++] = objp; 599 } 600 601 #ifndef CONFIG_NUMA 602 603 #define drain_alien_cache(cachep, alien) do { } while (0) 604 #define reap_alien(cachep, n) do { } while (0) 605 606 static inline struct alien_cache **alloc_alien_cache(int node, 607 int limit, gfp_t gfp) 608 { 609 return NULL; 610 } 611 612 static inline void free_alien_cache(struct alien_cache **ac_ptr) 613 { 614 } 615 616 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 617 { 618 return 0; 619 } 620 621 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 622 gfp_t flags) 623 { 624 return NULL; 625 } 626 627 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 628 gfp_t flags, int nodeid) 629 { 630 return NULL; 631 } 632 633 static inline gfp_t gfp_exact_node(gfp_t flags) 634 { 635 return flags & ~__GFP_NOFAIL; 636 } 637 638 #else /* CONFIG_NUMA */ 639 640 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 641 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 642 643 static struct alien_cache *__alloc_alien_cache(int node, int entries, 644 int batch, gfp_t gfp) 645 { 646 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); 647 struct alien_cache *alc = NULL; 648 649 alc = kmalloc_node(memsize, gfp, node); 650 if (alc) { 651 kmemleak_no_scan(alc); 652 init_arraycache(&alc->ac, entries, batch); 653 spin_lock_init(&alc->lock); 654 } 655 return alc; 656 } 657 658 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 659 { 660 struct alien_cache **alc_ptr; 661 int i; 662 663 if (limit > 1) 664 limit = 12; 665 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); 666 if (!alc_ptr) 667 return NULL; 668 669 for_each_node(i) { 670 if (i == node || !node_online(i)) 671 continue; 672 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); 673 if (!alc_ptr[i]) { 674 for (i--; i >= 0; i--) 675 kfree(alc_ptr[i]); 676 kfree(alc_ptr); 677 return NULL; 678 } 679 } 680 return alc_ptr; 681 } 682 683 static void free_alien_cache(struct alien_cache **alc_ptr) 684 { 685 int i; 686 687 if (!alc_ptr) 688 return; 689 for_each_node(i) 690 kfree(alc_ptr[i]); 691 kfree(alc_ptr); 692 } 693 694 static void __drain_alien_cache(struct kmem_cache *cachep, 695 struct array_cache *ac, int node, 696 struct list_head *list) 697 { 698 struct kmem_cache_node *n = get_node(cachep, node); 699 700 if (ac->avail) { 701 spin_lock(&n->list_lock); 702 /* 703 * Stuff objects into the remote nodes shared array first. 704 * That way we could avoid the overhead of putting the objects 705 * into the free lists and getting them back later. 706 */ 707 if (n->shared) 708 transfer_objects(n->shared, ac, ac->limit); 709 710 free_block(cachep, ac->entry, ac->avail, node, list); 711 ac->avail = 0; 712 spin_unlock(&n->list_lock); 713 } 714 } 715 716 /* 717 * Called from cache_reap() to regularly drain alien caches round robin. 718 */ 719 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 720 { 721 int node = __this_cpu_read(slab_reap_node); 722 723 if (n->alien) { 724 struct alien_cache *alc = n->alien[node]; 725 struct array_cache *ac; 726 727 if (alc) { 728 ac = &alc->ac; 729 if (ac->avail && spin_trylock_irq(&alc->lock)) { 730 LIST_HEAD(list); 731 732 __drain_alien_cache(cachep, ac, node, &list); 733 spin_unlock_irq(&alc->lock); 734 slabs_destroy(cachep, &list); 735 } 736 } 737 } 738 } 739 740 static void drain_alien_cache(struct kmem_cache *cachep, 741 struct alien_cache **alien) 742 { 743 int i = 0; 744 struct alien_cache *alc; 745 struct array_cache *ac; 746 unsigned long flags; 747 748 for_each_online_node(i) { 749 alc = alien[i]; 750 if (alc) { 751 LIST_HEAD(list); 752 753 ac = &alc->ac; 754 spin_lock_irqsave(&alc->lock, flags); 755 __drain_alien_cache(cachep, ac, i, &list); 756 spin_unlock_irqrestore(&alc->lock, flags); 757 slabs_destroy(cachep, &list); 758 } 759 } 760 } 761 762 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, 763 int node, int page_node) 764 { 765 struct kmem_cache_node *n; 766 struct alien_cache *alien = NULL; 767 struct array_cache *ac; 768 LIST_HEAD(list); 769 770 n = get_node(cachep, node); 771 STATS_INC_NODEFREES(cachep); 772 if (n->alien && n->alien[page_node]) { 773 alien = n->alien[page_node]; 774 ac = &alien->ac; 775 spin_lock(&alien->lock); 776 if (unlikely(ac->avail == ac->limit)) { 777 STATS_INC_ACOVERFLOW(cachep); 778 __drain_alien_cache(cachep, ac, page_node, &list); 779 } 780 __free_one(ac, objp); 781 spin_unlock(&alien->lock); 782 slabs_destroy(cachep, &list); 783 } else { 784 n = get_node(cachep, page_node); 785 spin_lock(&n->list_lock); 786 free_block(cachep, &objp, 1, page_node, &list); 787 spin_unlock(&n->list_lock); 788 slabs_destroy(cachep, &list); 789 } 790 return 1; 791 } 792 793 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 794 { 795 int page_node = page_to_nid(virt_to_page(objp)); 796 int node = numa_mem_id(); 797 /* 798 * Make sure we are not freeing a object from another node to the array 799 * cache on this cpu. 800 */ 801 if (likely(node == page_node)) 802 return 0; 803 804 return __cache_free_alien(cachep, objp, node, page_node); 805 } 806 807 /* 808 * Construct gfp mask to allocate from a specific node but do not reclaim or 809 * warn about failures. 810 */ 811 static inline gfp_t gfp_exact_node(gfp_t flags) 812 { 813 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL); 814 } 815 #endif 816 817 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) 818 { 819 struct kmem_cache_node *n; 820 821 /* 822 * Set up the kmem_cache_node for cpu before we can 823 * begin anything. Make sure some other cpu on this 824 * node has not already allocated this 825 */ 826 n = get_node(cachep, node); 827 if (n) { 828 spin_lock_irq(&n->list_lock); 829 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + 830 cachep->num; 831 spin_unlock_irq(&n->list_lock); 832 833 return 0; 834 } 835 836 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 837 if (!n) 838 return -ENOMEM; 839 840 kmem_cache_node_init(n); 841 n->next_reap = jiffies + REAPTIMEOUT_NODE + 842 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 843 844 n->free_limit = 845 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; 846 847 /* 848 * The kmem_cache_nodes don't come and go as CPUs 849 * come and go. slab_mutex is sufficient 850 * protection here. 851 */ 852 cachep->node[node] = n; 853 854 return 0; 855 } 856 857 #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP) 858 /* 859 * Allocates and initializes node for a node on each slab cache, used for 860 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 861 * will be allocated off-node since memory is not yet online for the new node. 862 * When hotplugging memory or a cpu, existing node are not replaced if 863 * already in use. 864 * 865 * Must hold slab_mutex. 866 */ 867 static int init_cache_node_node(int node) 868 { 869 int ret; 870 struct kmem_cache *cachep; 871 872 list_for_each_entry(cachep, &slab_caches, list) { 873 ret = init_cache_node(cachep, node, GFP_KERNEL); 874 if (ret) 875 return ret; 876 } 877 878 return 0; 879 } 880 #endif 881 882 static int setup_kmem_cache_node(struct kmem_cache *cachep, 883 int node, gfp_t gfp, bool force_change) 884 { 885 int ret = -ENOMEM; 886 struct kmem_cache_node *n; 887 struct array_cache *old_shared = NULL; 888 struct array_cache *new_shared = NULL; 889 struct alien_cache **new_alien = NULL; 890 LIST_HEAD(list); 891 892 if (use_alien_caches) { 893 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 894 if (!new_alien) 895 goto fail; 896 } 897 898 if (cachep->shared) { 899 new_shared = alloc_arraycache(node, 900 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); 901 if (!new_shared) 902 goto fail; 903 } 904 905 ret = init_cache_node(cachep, node, gfp); 906 if (ret) 907 goto fail; 908 909 n = get_node(cachep, node); 910 spin_lock_irq(&n->list_lock); 911 if (n->shared && force_change) { 912 free_block(cachep, n->shared->entry, 913 n->shared->avail, node, &list); 914 n->shared->avail = 0; 915 } 916 917 if (!n->shared || force_change) { 918 old_shared = n->shared; 919 n->shared = new_shared; 920 new_shared = NULL; 921 } 922 923 if (!n->alien) { 924 n->alien = new_alien; 925 new_alien = NULL; 926 } 927 928 spin_unlock_irq(&n->list_lock); 929 slabs_destroy(cachep, &list); 930 931 /* 932 * To protect lockless access to n->shared during irq disabled context. 933 * If n->shared isn't NULL in irq disabled context, accessing to it is 934 * guaranteed to be valid until irq is re-enabled, because it will be 935 * freed after synchronize_rcu(). 936 */ 937 if (old_shared && force_change) 938 synchronize_rcu(); 939 940 fail: 941 kfree(old_shared); 942 kfree(new_shared); 943 free_alien_cache(new_alien); 944 945 return ret; 946 } 947 948 #ifdef CONFIG_SMP 949 950 static void cpuup_canceled(long cpu) 951 { 952 struct kmem_cache *cachep; 953 struct kmem_cache_node *n = NULL; 954 int node = cpu_to_mem(cpu); 955 const struct cpumask *mask = cpumask_of_node(node); 956 957 list_for_each_entry(cachep, &slab_caches, list) { 958 struct array_cache *nc; 959 struct array_cache *shared; 960 struct alien_cache **alien; 961 LIST_HEAD(list); 962 963 n = get_node(cachep, node); 964 if (!n) 965 continue; 966 967 spin_lock_irq(&n->list_lock); 968 969 /* Free limit for this kmem_cache_node */ 970 n->free_limit -= cachep->batchcount; 971 972 /* cpu is dead; no one can alloc from it. */ 973 nc = per_cpu_ptr(cachep->cpu_cache, cpu); 974 free_block(cachep, nc->entry, nc->avail, node, &list); 975 nc->avail = 0; 976 977 if (!cpumask_empty(mask)) { 978 spin_unlock_irq(&n->list_lock); 979 goto free_slab; 980 } 981 982 shared = n->shared; 983 if (shared) { 984 free_block(cachep, shared->entry, 985 shared->avail, node, &list); 986 n->shared = NULL; 987 } 988 989 alien = n->alien; 990 n->alien = NULL; 991 992 spin_unlock_irq(&n->list_lock); 993 994 kfree(shared); 995 if (alien) { 996 drain_alien_cache(cachep, alien); 997 free_alien_cache(alien); 998 } 999 1000 free_slab: 1001 slabs_destroy(cachep, &list); 1002 } 1003 /* 1004 * In the previous loop, all the objects were freed to 1005 * the respective cache's slabs, now we can go ahead and 1006 * shrink each nodelist to its limit. 1007 */ 1008 list_for_each_entry(cachep, &slab_caches, list) { 1009 n = get_node(cachep, node); 1010 if (!n) 1011 continue; 1012 drain_freelist(cachep, n, INT_MAX); 1013 } 1014 } 1015 1016 static int cpuup_prepare(long cpu) 1017 { 1018 struct kmem_cache *cachep; 1019 int node = cpu_to_mem(cpu); 1020 int err; 1021 1022 /* 1023 * We need to do this right in the beginning since 1024 * alloc_arraycache's are going to use this list. 1025 * kmalloc_node allows us to add the slab to the right 1026 * kmem_cache_node and not this cpu's kmem_cache_node 1027 */ 1028 err = init_cache_node_node(node); 1029 if (err < 0) 1030 goto bad; 1031 1032 /* 1033 * Now we can go ahead with allocating the shared arrays and 1034 * array caches 1035 */ 1036 list_for_each_entry(cachep, &slab_caches, list) { 1037 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); 1038 if (err) 1039 goto bad; 1040 } 1041 1042 return 0; 1043 bad: 1044 cpuup_canceled(cpu); 1045 return -ENOMEM; 1046 } 1047 1048 int slab_prepare_cpu(unsigned int cpu) 1049 { 1050 int err; 1051 1052 mutex_lock(&slab_mutex); 1053 err = cpuup_prepare(cpu); 1054 mutex_unlock(&slab_mutex); 1055 return err; 1056 } 1057 1058 /* 1059 * This is called for a failed online attempt and for a successful 1060 * offline. 1061 * 1062 * Even if all the cpus of a node are down, we don't free the 1063 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and 1064 * a kmalloc allocation from another cpu for memory from the node of 1065 * the cpu going down. The kmem_cache_node structure is usually allocated from 1066 * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). 1067 */ 1068 int slab_dead_cpu(unsigned int cpu) 1069 { 1070 mutex_lock(&slab_mutex); 1071 cpuup_canceled(cpu); 1072 mutex_unlock(&slab_mutex); 1073 return 0; 1074 } 1075 #endif 1076 1077 static int slab_online_cpu(unsigned int cpu) 1078 { 1079 start_cpu_timer(cpu); 1080 return 0; 1081 } 1082 1083 static int slab_offline_cpu(unsigned int cpu) 1084 { 1085 /* 1086 * Shutdown cache reaper. Note that the slab_mutex is held so 1087 * that if cache_reap() is invoked it cannot do anything 1088 * expensive but will only modify reap_work and reschedule the 1089 * timer. 1090 */ 1091 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1092 /* Now the cache_reaper is guaranteed to be not running. */ 1093 per_cpu(slab_reap_work, cpu).work.func = NULL; 1094 return 0; 1095 } 1096 1097 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1098 /* 1099 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1100 * Returns -EBUSY if all objects cannot be drained so that the node is not 1101 * removed. 1102 * 1103 * Must hold slab_mutex. 1104 */ 1105 static int __meminit drain_cache_node_node(int node) 1106 { 1107 struct kmem_cache *cachep; 1108 int ret = 0; 1109 1110 list_for_each_entry(cachep, &slab_caches, list) { 1111 struct kmem_cache_node *n; 1112 1113 n = get_node(cachep, node); 1114 if (!n) 1115 continue; 1116 1117 drain_freelist(cachep, n, INT_MAX); 1118 1119 if (!list_empty(&n->slabs_full) || 1120 !list_empty(&n->slabs_partial)) { 1121 ret = -EBUSY; 1122 break; 1123 } 1124 } 1125 return ret; 1126 } 1127 1128 static int __meminit slab_memory_callback(struct notifier_block *self, 1129 unsigned long action, void *arg) 1130 { 1131 struct memory_notify *mnb = arg; 1132 int ret = 0; 1133 int nid; 1134 1135 nid = mnb->status_change_nid; 1136 if (nid < 0) 1137 goto out; 1138 1139 switch (action) { 1140 case MEM_GOING_ONLINE: 1141 mutex_lock(&slab_mutex); 1142 ret = init_cache_node_node(nid); 1143 mutex_unlock(&slab_mutex); 1144 break; 1145 case MEM_GOING_OFFLINE: 1146 mutex_lock(&slab_mutex); 1147 ret = drain_cache_node_node(nid); 1148 mutex_unlock(&slab_mutex); 1149 break; 1150 case MEM_ONLINE: 1151 case MEM_OFFLINE: 1152 case MEM_CANCEL_ONLINE: 1153 case MEM_CANCEL_OFFLINE: 1154 break; 1155 } 1156 out: 1157 return notifier_from_errno(ret); 1158 } 1159 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1160 1161 /* 1162 * swap the static kmem_cache_node with kmalloced memory 1163 */ 1164 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1165 int nodeid) 1166 { 1167 struct kmem_cache_node *ptr; 1168 1169 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1170 BUG_ON(!ptr); 1171 1172 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1173 /* 1174 * Do not assume that spinlocks can be initialized via memcpy: 1175 */ 1176 spin_lock_init(&ptr->list_lock); 1177 1178 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1179 cachep->node[nodeid] = ptr; 1180 } 1181 1182 /* 1183 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1184 * size of kmem_cache_node. 1185 */ 1186 static void __init set_up_node(struct kmem_cache *cachep, int index) 1187 { 1188 int node; 1189 1190 for_each_online_node(node) { 1191 cachep->node[node] = &init_kmem_cache_node[index + node]; 1192 cachep->node[node]->next_reap = jiffies + 1193 REAPTIMEOUT_NODE + 1194 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1195 } 1196 } 1197 1198 /* 1199 * Initialisation. Called after the page allocator have been initialised and 1200 * before smp_init(). 1201 */ 1202 void __init kmem_cache_init(void) 1203 { 1204 int i; 1205 1206 kmem_cache = &kmem_cache_boot; 1207 1208 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1) 1209 use_alien_caches = 0; 1210 1211 for (i = 0; i < NUM_INIT_LISTS; i++) 1212 kmem_cache_node_init(&init_kmem_cache_node[i]); 1213 1214 /* 1215 * Fragmentation resistance on low memory - only use bigger 1216 * page orders on machines with more than 32MB of memory if 1217 * not overridden on the command line. 1218 */ 1219 if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) 1220 slab_max_order = SLAB_MAX_ORDER_HI; 1221 1222 /* Bootstrap is tricky, because several objects are allocated 1223 * from caches that do not exist yet: 1224 * 1) initialize the kmem_cache cache: it contains the struct 1225 * kmem_cache structures of all caches, except kmem_cache itself: 1226 * kmem_cache is statically allocated. 1227 * Initially an __init data area is used for the head array and the 1228 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1229 * array at the end of the bootstrap. 1230 * 2) Create the first kmalloc cache. 1231 * The struct kmem_cache for the new cache is allocated normally. 1232 * An __init data area is used for the head array. 1233 * 3) Create the remaining kmalloc caches, with minimally sized 1234 * head arrays. 1235 * 4) Replace the __init data head arrays for kmem_cache and the first 1236 * kmalloc cache with kmalloc allocated arrays. 1237 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1238 * the other cache's with kmalloc allocated memory. 1239 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1240 */ 1241 1242 /* 1) create the kmem_cache */ 1243 1244 /* 1245 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1246 */ 1247 create_boot_cache(kmem_cache, "kmem_cache", 1248 offsetof(struct kmem_cache, node) + 1249 nr_node_ids * sizeof(struct kmem_cache_node *), 1250 SLAB_HWCACHE_ALIGN, 0, 0); 1251 list_add(&kmem_cache->list, &slab_caches); 1252 slab_state = PARTIAL; 1253 1254 /* 1255 * Initialize the caches that provide memory for the kmem_cache_node 1256 * structures first. Without this, further allocations will bug. 1257 */ 1258 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( 1259 kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], 1260 kmalloc_info[INDEX_NODE].size, 1261 ARCH_KMALLOC_FLAGS, 0, 1262 kmalloc_info[INDEX_NODE].size); 1263 slab_state = PARTIAL_NODE; 1264 setup_kmalloc_cache_index_table(); 1265 1266 slab_early_init = 0; 1267 1268 /* 5) Replace the bootstrap kmem_cache_node */ 1269 { 1270 int nid; 1271 1272 for_each_online_node(nid) { 1273 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1274 1275 init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE], 1276 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1277 } 1278 } 1279 1280 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1281 } 1282 1283 void __init kmem_cache_init_late(void) 1284 { 1285 struct kmem_cache *cachep; 1286 1287 /* 6) resize the head arrays to their final sizes */ 1288 mutex_lock(&slab_mutex); 1289 list_for_each_entry(cachep, &slab_caches, list) 1290 if (enable_cpucache(cachep, GFP_NOWAIT)) 1291 BUG(); 1292 mutex_unlock(&slab_mutex); 1293 1294 /* Done! */ 1295 slab_state = FULL; 1296 1297 #ifdef CONFIG_NUMA 1298 /* 1299 * Register a memory hotplug callback that initializes and frees 1300 * node. 1301 */ 1302 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1303 #endif 1304 1305 /* 1306 * The reap timers are started later, with a module init call: That part 1307 * of the kernel is not yet operational. 1308 */ 1309 } 1310 1311 static int __init cpucache_init(void) 1312 { 1313 int ret; 1314 1315 /* 1316 * Register the timers that return unneeded pages to the page allocator 1317 */ 1318 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", 1319 slab_online_cpu, slab_offline_cpu); 1320 WARN_ON(ret < 0); 1321 1322 return 0; 1323 } 1324 __initcall(cpucache_init); 1325 1326 static noinline void 1327 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1328 { 1329 #if DEBUG 1330 struct kmem_cache_node *n; 1331 unsigned long flags; 1332 int node; 1333 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 1334 DEFAULT_RATELIMIT_BURST); 1335 1336 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) 1337 return; 1338 1339 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 1340 nodeid, gfpflags, &gfpflags); 1341 pr_warn(" cache: %s, object size: %d, order: %d\n", 1342 cachep->name, cachep->size, cachep->gfporder); 1343 1344 for_each_kmem_cache_node(cachep, node, n) { 1345 unsigned long total_slabs, free_slabs, free_objs; 1346 1347 spin_lock_irqsave(&n->list_lock, flags); 1348 total_slabs = n->total_slabs; 1349 free_slabs = n->free_slabs; 1350 free_objs = n->free_objects; 1351 spin_unlock_irqrestore(&n->list_lock, flags); 1352 1353 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", 1354 node, total_slabs - free_slabs, total_slabs, 1355 (total_slabs * cachep->num) - free_objs, 1356 total_slabs * cachep->num); 1357 } 1358 #endif 1359 } 1360 1361 /* 1362 * Interface to system's page allocator. No need to hold the 1363 * kmem_cache_node ->list_lock. 1364 * 1365 * If we requested dmaable memory, we will get it. Even if we 1366 * did not request dmaable memory, we might get it, but that 1367 * would be relatively rare and ignorable. 1368 */ 1369 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1370 int nodeid) 1371 { 1372 struct page *page; 1373 1374 flags |= cachep->allocflags; 1375 1376 page = __alloc_pages_node(nodeid, flags, cachep->gfporder); 1377 if (!page) { 1378 slab_out_of_memory(cachep, flags, nodeid); 1379 return NULL; 1380 } 1381 1382 account_slab_page(page, cachep->gfporder, cachep); 1383 __SetPageSlab(page); 1384 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1385 if (sk_memalloc_socks() && page_is_pfmemalloc(page)) 1386 SetPageSlabPfmemalloc(page); 1387 1388 return page; 1389 } 1390 1391 /* 1392 * Interface to system's page release. 1393 */ 1394 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1395 { 1396 int order = cachep->gfporder; 1397 1398 BUG_ON(!PageSlab(page)); 1399 __ClearPageSlabPfmemalloc(page); 1400 __ClearPageSlab(page); 1401 page_mapcount_reset(page); 1402 /* In union with page->mapping where page allocator expects NULL */ 1403 page->slab_cache = NULL; 1404 1405 if (current->reclaim_state) 1406 current->reclaim_state->reclaimed_slab += 1 << order; 1407 unaccount_slab_page(page, order, cachep); 1408 __free_pages(page, order); 1409 } 1410 1411 static void kmem_rcu_free(struct rcu_head *head) 1412 { 1413 struct kmem_cache *cachep; 1414 struct page *page; 1415 1416 page = container_of(head, struct page, rcu_head); 1417 cachep = page->slab_cache; 1418 1419 kmem_freepages(cachep, page); 1420 } 1421 1422 #if DEBUG 1423 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) 1424 { 1425 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && 1426 (cachep->size % PAGE_SIZE) == 0) 1427 return true; 1428 1429 return false; 1430 } 1431 1432 #ifdef CONFIG_DEBUG_PAGEALLOC 1433 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) 1434 { 1435 if (!is_debug_pagealloc_cache(cachep)) 1436 return; 1437 1438 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); 1439 } 1440 1441 #else 1442 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, 1443 int map) {} 1444 1445 #endif 1446 1447 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1448 { 1449 int size = cachep->object_size; 1450 addr = &((char *)addr)[obj_offset(cachep)]; 1451 1452 memset(addr, val, size); 1453 *(unsigned char *)(addr + size - 1) = POISON_END; 1454 } 1455 1456 static void dump_line(char *data, int offset, int limit) 1457 { 1458 int i; 1459 unsigned char error = 0; 1460 int bad_count = 0; 1461 1462 pr_err("%03x: ", offset); 1463 for (i = 0; i < limit; i++) { 1464 if (data[offset + i] != POISON_FREE) { 1465 error = data[offset + i]; 1466 bad_count++; 1467 } 1468 } 1469 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1470 &data[offset], limit, 1); 1471 1472 if (bad_count == 1) { 1473 error ^= POISON_FREE; 1474 if (!(error & (error - 1))) { 1475 pr_err("Single bit error detected. Probably bad RAM.\n"); 1476 #ifdef CONFIG_X86 1477 pr_err("Run memtest86+ or a similar memory test tool.\n"); 1478 #else 1479 pr_err("Run a memory test tool.\n"); 1480 #endif 1481 } 1482 } 1483 } 1484 #endif 1485 1486 #if DEBUG 1487 1488 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1489 { 1490 int i, size; 1491 char *realobj; 1492 1493 if (cachep->flags & SLAB_RED_ZONE) { 1494 pr_err("Redzone: 0x%llx/0x%llx\n", 1495 *dbg_redzone1(cachep, objp), 1496 *dbg_redzone2(cachep, objp)); 1497 } 1498 1499 if (cachep->flags & SLAB_STORE_USER) 1500 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); 1501 realobj = (char *)objp + obj_offset(cachep); 1502 size = cachep->object_size; 1503 for (i = 0; i < size && lines; i += 16, lines--) { 1504 int limit; 1505 limit = 16; 1506 if (i + limit > size) 1507 limit = size - i; 1508 dump_line(realobj, i, limit); 1509 } 1510 } 1511 1512 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1513 { 1514 char *realobj; 1515 int size, i; 1516 int lines = 0; 1517 1518 if (is_debug_pagealloc_cache(cachep)) 1519 return; 1520 1521 realobj = (char *)objp + obj_offset(cachep); 1522 size = cachep->object_size; 1523 1524 for (i = 0; i < size; i++) { 1525 char exp = POISON_FREE; 1526 if (i == size - 1) 1527 exp = POISON_END; 1528 if (realobj[i] != exp) { 1529 int limit; 1530 /* Mismatch ! */ 1531 /* Print header */ 1532 if (lines == 0) { 1533 pr_err("Slab corruption (%s): %s start=%px, len=%d\n", 1534 print_tainted(), cachep->name, 1535 realobj, size); 1536 print_objinfo(cachep, objp, 0); 1537 } 1538 /* Hexdump the affected line */ 1539 i = (i / 16) * 16; 1540 limit = 16; 1541 if (i + limit > size) 1542 limit = size - i; 1543 dump_line(realobj, i, limit); 1544 i += 16; 1545 lines++; 1546 /* Limit to 5 lines */ 1547 if (lines > 5) 1548 break; 1549 } 1550 } 1551 if (lines != 0) { 1552 /* Print some data about the neighboring objects, if they 1553 * exist: 1554 */ 1555 struct page *page = virt_to_head_page(objp); 1556 unsigned int objnr; 1557 1558 objnr = obj_to_index(cachep, page, objp); 1559 if (objnr) { 1560 objp = index_to_obj(cachep, page, objnr - 1); 1561 realobj = (char *)objp + obj_offset(cachep); 1562 pr_err("Prev obj: start=%px, len=%d\n", realobj, size); 1563 print_objinfo(cachep, objp, 2); 1564 } 1565 if (objnr + 1 < cachep->num) { 1566 objp = index_to_obj(cachep, page, objnr + 1); 1567 realobj = (char *)objp + obj_offset(cachep); 1568 pr_err("Next obj: start=%px, len=%d\n", realobj, size); 1569 print_objinfo(cachep, objp, 2); 1570 } 1571 } 1572 } 1573 #endif 1574 1575 #if DEBUG 1576 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1577 struct page *page) 1578 { 1579 int i; 1580 1581 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { 1582 poison_obj(cachep, page->freelist - obj_offset(cachep), 1583 POISON_FREE); 1584 } 1585 1586 for (i = 0; i < cachep->num; i++) { 1587 void *objp = index_to_obj(cachep, page, i); 1588 1589 if (cachep->flags & SLAB_POISON) { 1590 check_poison_obj(cachep, objp); 1591 slab_kernel_map(cachep, objp, 1); 1592 } 1593 if (cachep->flags & SLAB_RED_ZONE) { 1594 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1595 slab_error(cachep, "start of a freed object was overwritten"); 1596 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1597 slab_error(cachep, "end of a freed object was overwritten"); 1598 } 1599 } 1600 } 1601 #else 1602 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1603 struct page *page) 1604 { 1605 } 1606 #endif 1607 1608 /** 1609 * slab_destroy - destroy and release all objects in a slab 1610 * @cachep: cache pointer being destroyed 1611 * @page: page pointer being destroyed 1612 * 1613 * Destroy all the objs in a slab page, and release the mem back to the system. 1614 * Before calling the slab page must have been unlinked from the cache. The 1615 * kmem_cache_node ->list_lock is not held/needed. 1616 */ 1617 static void slab_destroy(struct kmem_cache *cachep, struct page *page) 1618 { 1619 void *freelist; 1620 1621 freelist = page->freelist; 1622 slab_destroy_debugcheck(cachep, page); 1623 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) 1624 call_rcu(&page->rcu_head, kmem_rcu_free); 1625 else 1626 kmem_freepages(cachep, page); 1627 1628 /* 1629 * From now on, we don't use freelist 1630 * although actual page can be freed in rcu context 1631 */ 1632 if (OFF_SLAB(cachep)) 1633 kmem_cache_free(cachep->freelist_cache, freelist); 1634 } 1635 1636 /* 1637 * Update the size of the caches before calling slabs_destroy as it may 1638 * recursively call kfree. 1639 */ 1640 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) 1641 { 1642 struct page *page, *n; 1643 1644 list_for_each_entry_safe(page, n, list, slab_list) { 1645 list_del(&page->slab_list); 1646 slab_destroy(cachep, page); 1647 } 1648 } 1649 1650 /** 1651 * calculate_slab_order - calculate size (page order) of slabs 1652 * @cachep: pointer to the cache that is being created 1653 * @size: size of objects to be created in this cache. 1654 * @flags: slab allocation flags 1655 * 1656 * Also calculates the number of objects per slab. 1657 * 1658 * This could be made much more intelligent. For now, try to avoid using 1659 * high order pages for slabs. When the gfp() functions are more friendly 1660 * towards high-order requests, this should be changed. 1661 * 1662 * Return: number of left-over bytes in a slab 1663 */ 1664 static size_t calculate_slab_order(struct kmem_cache *cachep, 1665 size_t size, slab_flags_t flags) 1666 { 1667 size_t left_over = 0; 1668 int gfporder; 1669 1670 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1671 unsigned int num; 1672 size_t remainder; 1673 1674 num = cache_estimate(gfporder, size, flags, &remainder); 1675 if (!num) 1676 continue; 1677 1678 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 1679 if (num > SLAB_OBJ_MAX_NUM) 1680 break; 1681 1682 if (flags & CFLGS_OFF_SLAB) { 1683 struct kmem_cache *freelist_cache; 1684 size_t freelist_size; 1685 1686 freelist_size = num * sizeof(freelist_idx_t); 1687 freelist_cache = kmalloc_slab(freelist_size, 0u); 1688 if (!freelist_cache) 1689 continue; 1690 1691 /* 1692 * Needed to avoid possible looping condition 1693 * in cache_grow_begin() 1694 */ 1695 if (OFF_SLAB(freelist_cache)) 1696 continue; 1697 1698 /* check if off slab has enough benefit */ 1699 if (freelist_cache->size > cachep->size / 2) 1700 continue; 1701 } 1702 1703 /* Found something acceptable - save it away */ 1704 cachep->num = num; 1705 cachep->gfporder = gfporder; 1706 left_over = remainder; 1707 1708 /* 1709 * A VFS-reclaimable slab tends to have most allocations 1710 * as GFP_NOFS and we really don't want to have to be allocating 1711 * higher-order pages when we are unable to shrink dcache. 1712 */ 1713 if (flags & SLAB_RECLAIM_ACCOUNT) 1714 break; 1715 1716 /* 1717 * Large number of objects is good, but very large slabs are 1718 * currently bad for the gfp()s. 1719 */ 1720 if (gfporder >= slab_max_order) 1721 break; 1722 1723 /* 1724 * Acceptable internal fragmentation? 1725 */ 1726 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1727 break; 1728 } 1729 return left_over; 1730 } 1731 1732 static struct array_cache __percpu *alloc_kmem_cache_cpus( 1733 struct kmem_cache *cachep, int entries, int batchcount) 1734 { 1735 int cpu; 1736 size_t size; 1737 struct array_cache __percpu *cpu_cache; 1738 1739 size = sizeof(void *) * entries + sizeof(struct array_cache); 1740 cpu_cache = __alloc_percpu(size, sizeof(void *)); 1741 1742 if (!cpu_cache) 1743 return NULL; 1744 1745 for_each_possible_cpu(cpu) { 1746 init_arraycache(per_cpu_ptr(cpu_cache, cpu), 1747 entries, batchcount); 1748 } 1749 1750 return cpu_cache; 1751 } 1752 1753 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 1754 { 1755 if (slab_state >= FULL) 1756 return enable_cpucache(cachep, gfp); 1757 1758 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); 1759 if (!cachep->cpu_cache) 1760 return 1; 1761 1762 if (slab_state == DOWN) { 1763 /* Creation of first cache (kmem_cache). */ 1764 set_up_node(kmem_cache, CACHE_CACHE); 1765 } else if (slab_state == PARTIAL) { 1766 /* For kmem_cache_node */ 1767 set_up_node(cachep, SIZE_NODE); 1768 } else { 1769 int node; 1770 1771 for_each_online_node(node) { 1772 cachep->node[node] = kmalloc_node( 1773 sizeof(struct kmem_cache_node), gfp, node); 1774 BUG_ON(!cachep->node[node]); 1775 kmem_cache_node_init(cachep->node[node]); 1776 } 1777 } 1778 1779 cachep->node[numa_mem_id()]->next_reap = 1780 jiffies + REAPTIMEOUT_NODE + 1781 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1782 1783 cpu_cache_get(cachep)->avail = 0; 1784 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 1785 cpu_cache_get(cachep)->batchcount = 1; 1786 cpu_cache_get(cachep)->touched = 0; 1787 cachep->batchcount = 1; 1788 cachep->limit = BOOT_CPUCACHE_ENTRIES; 1789 return 0; 1790 } 1791 1792 slab_flags_t kmem_cache_flags(unsigned int object_size, 1793 slab_flags_t flags, const char *name, 1794 void (*ctor)(void *)) 1795 { 1796 return flags; 1797 } 1798 1799 struct kmem_cache * 1800 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 1801 slab_flags_t flags, void (*ctor)(void *)) 1802 { 1803 struct kmem_cache *cachep; 1804 1805 cachep = find_mergeable(size, align, flags, name, ctor); 1806 if (cachep) { 1807 cachep->refcount++; 1808 1809 /* 1810 * Adjust the object sizes so that we clear 1811 * the complete object on kzalloc. 1812 */ 1813 cachep->object_size = max_t(int, cachep->object_size, size); 1814 } 1815 return cachep; 1816 } 1817 1818 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, 1819 size_t size, slab_flags_t flags) 1820 { 1821 size_t left; 1822 1823 cachep->num = 0; 1824 1825 /* 1826 * If slab auto-initialization on free is enabled, store the freelist 1827 * off-slab, so that its contents don't end up in one of the allocated 1828 * objects. 1829 */ 1830 if (unlikely(slab_want_init_on_free(cachep))) 1831 return false; 1832 1833 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) 1834 return false; 1835 1836 left = calculate_slab_order(cachep, size, 1837 flags | CFLGS_OBJFREELIST_SLAB); 1838 if (!cachep->num) 1839 return false; 1840 1841 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) 1842 return false; 1843 1844 cachep->colour = left / cachep->colour_off; 1845 1846 return true; 1847 } 1848 1849 static bool set_off_slab_cache(struct kmem_cache *cachep, 1850 size_t size, slab_flags_t flags) 1851 { 1852 size_t left; 1853 1854 cachep->num = 0; 1855 1856 /* 1857 * Always use on-slab management when SLAB_NOLEAKTRACE 1858 * to avoid recursive calls into kmemleak. 1859 */ 1860 if (flags & SLAB_NOLEAKTRACE) 1861 return false; 1862 1863 /* 1864 * Size is large, assume best to place the slab management obj 1865 * off-slab (should allow better packing of objs). 1866 */ 1867 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); 1868 if (!cachep->num) 1869 return false; 1870 1871 /* 1872 * If the slab has been placed off-slab, and we have enough space then 1873 * move it on-slab. This is at the expense of any extra colouring. 1874 */ 1875 if (left >= cachep->num * sizeof(freelist_idx_t)) 1876 return false; 1877 1878 cachep->colour = left / cachep->colour_off; 1879 1880 return true; 1881 } 1882 1883 static bool set_on_slab_cache(struct kmem_cache *cachep, 1884 size_t size, slab_flags_t flags) 1885 { 1886 size_t left; 1887 1888 cachep->num = 0; 1889 1890 left = calculate_slab_order(cachep, size, flags); 1891 if (!cachep->num) 1892 return false; 1893 1894 cachep->colour = left / cachep->colour_off; 1895 1896 return true; 1897 } 1898 1899 /** 1900 * __kmem_cache_create - Create a cache. 1901 * @cachep: cache management descriptor 1902 * @flags: SLAB flags 1903 * 1904 * Returns a ptr to the cache on success, NULL on failure. 1905 * Cannot be called within a int, but can be interrupted. 1906 * The @ctor is run when new pages are allocated by the cache. 1907 * 1908 * The flags are 1909 * 1910 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 1911 * to catch references to uninitialised memory. 1912 * 1913 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 1914 * for buffer overruns. 1915 * 1916 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 1917 * cacheline. This can be beneficial if you're counting cycles as closely 1918 * as davem. 1919 * 1920 * Return: a pointer to the created cache or %NULL in case of error 1921 */ 1922 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) 1923 { 1924 size_t ralign = BYTES_PER_WORD; 1925 gfp_t gfp; 1926 int err; 1927 unsigned int size = cachep->size; 1928 1929 #if DEBUG 1930 #if FORCED_DEBUG 1931 /* 1932 * Enable redzoning and last user accounting, except for caches with 1933 * large objects, if the increased size would increase the object size 1934 * above the next power of two: caches with object sizes just above a 1935 * power of two have a significant amount of internal fragmentation. 1936 */ 1937 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 1938 2 * sizeof(unsigned long long))) 1939 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 1940 if (!(flags & SLAB_TYPESAFE_BY_RCU)) 1941 flags |= SLAB_POISON; 1942 #endif 1943 #endif 1944 1945 /* 1946 * Check that size is in terms of words. This is needed to avoid 1947 * unaligned accesses for some archs when redzoning is used, and makes 1948 * sure any on-slab bufctl's are also correctly aligned. 1949 */ 1950 size = ALIGN(size, BYTES_PER_WORD); 1951 1952 if (flags & SLAB_RED_ZONE) { 1953 ralign = REDZONE_ALIGN; 1954 /* If redzoning, ensure that the second redzone is suitably 1955 * aligned, by adjusting the object size accordingly. */ 1956 size = ALIGN(size, REDZONE_ALIGN); 1957 } 1958 1959 /* 3) caller mandated alignment */ 1960 if (ralign < cachep->align) { 1961 ralign = cachep->align; 1962 } 1963 /* disable debug if necessary */ 1964 if (ralign > __alignof__(unsigned long long)) 1965 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 1966 /* 1967 * 4) Store it. 1968 */ 1969 cachep->align = ralign; 1970 cachep->colour_off = cache_line_size(); 1971 /* Offset must be a multiple of the alignment. */ 1972 if (cachep->colour_off < cachep->align) 1973 cachep->colour_off = cachep->align; 1974 1975 if (slab_is_available()) 1976 gfp = GFP_KERNEL; 1977 else 1978 gfp = GFP_NOWAIT; 1979 1980 #if DEBUG 1981 1982 /* 1983 * Both debugging options require word-alignment which is calculated 1984 * into align above. 1985 */ 1986 if (flags & SLAB_RED_ZONE) { 1987 /* add space for red zone words */ 1988 cachep->obj_offset += sizeof(unsigned long long); 1989 size += 2 * sizeof(unsigned long long); 1990 } 1991 if (flags & SLAB_STORE_USER) { 1992 /* user store requires one word storage behind the end of 1993 * the real object. But if the second red zone needs to be 1994 * aligned to 64 bits, we must allow that much space. 1995 */ 1996 if (flags & SLAB_RED_ZONE) 1997 size += REDZONE_ALIGN; 1998 else 1999 size += BYTES_PER_WORD; 2000 } 2001 #endif 2002 2003 kasan_cache_create(cachep, &size, &flags); 2004 2005 size = ALIGN(size, cachep->align); 2006 /* 2007 * We should restrict the number of objects in a slab to implement 2008 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2009 */ 2010 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2011 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2012 2013 #if DEBUG 2014 /* 2015 * To activate debug pagealloc, off-slab management is necessary 2016 * requirement. In early phase of initialization, small sized slab 2017 * doesn't get initialized so it would not be possible. So, we need 2018 * to check size >= 256. It guarantees that all necessary small 2019 * sized slab is initialized in current slab initialization sequence. 2020 */ 2021 if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && 2022 size >= 256 && cachep->object_size > cache_line_size()) { 2023 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { 2024 size_t tmp_size = ALIGN(size, PAGE_SIZE); 2025 2026 if (set_off_slab_cache(cachep, tmp_size, flags)) { 2027 flags |= CFLGS_OFF_SLAB; 2028 cachep->obj_offset += tmp_size - size; 2029 size = tmp_size; 2030 goto done; 2031 } 2032 } 2033 } 2034 #endif 2035 2036 if (set_objfreelist_slab_cache(cachep, size, flags)) { 2037 flags |= CFLGS_OBJFREELIST_SLAB; 2038 goto done; 2039 } 2040 2041 if (set_off_slab_cache(cachep, size, flags)) { 2042 flags |= CFLGS_OFF_SLAB; 2043 goto done; 2044 } 2045 2046 if (set_on_slab_cache(cachep, size, flags)) 2047 goto done; 2048 2049 return -E2BIG; 2050 2051 done: 2052 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); 2053 cachep->flags = flags; 2054 cachep->allocflags = __GFP_COMP; 2055 if (flags & SLAB_CACHE_DMA) 2056 cachep->allocflags |= GFP_DMA; 2057 if (flags & SLAB_CACHE_DMA32) 2058 cachep->allocflags |= GFP_DMA32; 2059 if (flags & SLAB_RECLAIM_ACCOUNT) 2060 cachep->allocflags |= __GFP_RECLAIMABLE; 2061 cachep->size = size; 2062 cachep->reciprocal_buffer_size = reciprocal_value(size); 2063 2064 #if DEBUG 2065 /* 2066 * If we're going to use the generic kernel_map_pages() 2067 * poisoning, then it's going to smash the contents of 2068 * the redzone and userword anyhow, so switch them off. 2069 */ 2070 if (IS_ENABLED(CONFIG_PAGE_POISONING) && 2071 (cachep->flags & SLAB_POISON) && 2072 is_debug_pagealloc_cache(cachep)) 2073 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2074 #endif 2075 2076 if (OFF_SLAB(cachep)) { 2077 cachep->freelist_cache = 2078 kmalloc_slab(cachep->freelist_size, 0u); 2079 } 2080 2081 err = setup_cpu_cache(cachep, gfp); 2082 if (err) { 2083 __kmem_cache_release(cachep); 2084 return err; 2085 } 2086 2087 return 0; 2088 } 2089 2090 #if DEBUG 2091 static void check_irq_off(void) 2092 { 2093 BUG_ON(!irqs_disabled()); 2094 } 2095 2096 static void check_irq_on(void) 2097 { 2098 BUG_ON(irqs_disabled()); 2099 } 2100 2101 static void check_mutex_acquired(void) 2102 { 2103 BUG_ON(!mutex_is_locked(&slab_mutex)); 2104 } 2105 2106 static void check_spinlock_acquired(struct kmem_cache *cachep) 2107 { 2108 #ifdef CONFIG_SMP 2109 check_irq_off(); 2110 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); 2111 #endif 2112 } 2113 2114 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2115 { 2116 #ifdef CONFIG_SMP 2117 check_irq_off(); 2118 assert_spin_locked(&get_node(cachep, node)->list_lock); 2119 #endif 2120 } 2121 2122 #else 2123 #define check_irq_off() do { } while(0) 2124 #define check_irq_on() do { } while(0) 2125 #define check_mutex_acquired() do { } while(0) 2126 #define check_spinlock_acquired(x) do { } while(0) 2127 #define check_spinlock_acquired_node(x, y) do { } while(0) 2128 #endif 2129 2130 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, 2131 int node, bool free_all, struct list_head *list) 2132 { 2133 int tofree; 2134 2135 if (!ac || !ac->avail) 2136 return; 2137 2138 tofree = free_all ? ac->avail : (ac->limit + 4) / 5; 2139 if (tofree > ac->avail) 2140 tofree = (ac->avail + 1) / 2; 2141 2142 free_block(cachep, ac->entry, tofree, node, list); 2143 ac->avail -= tofree; 2144 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); 2145 } 2146 2147 static void do_drain(void *arg) 2148 { 2149 struct kmem_cache *cachep = arg; 2150 struct array_cache *ac; 2151 int node = numa_mem_id(); 2152 struct kmem_cache_node *n; 2153 LIST_HEAD(list); 2154 2155 check_irq_off(); 2156 ac = cpu_cache_get(cachep); 2157 n = get_node(cachep, node); 2158 spin_lock(&n->list_lock); 2159 free_block(cachep, ac->entry, ac->avail, node, &list); 2160 spin_unlock(&n->list_lock); 2161 ac->avail = 0; 2162 slabs_destroy(cachep, &list); 2163 } 2164 2165 static void drain_cpu_caches(struct kmem_cache *cachep) 2166 { 2167 struct kmem_cache_node *n; 2168 int node; 2169 LIST_HEAD(list); 2170 2171 on_each_cpu(do_drain, cachep, 1); 2172 check_irq_on(); 2173 for_each_kmem_cache_node(cachep, node, n) 2174 if (n->alien) 2175 drain_alien_cache(cachep, n->alien); 2176 2177 for_each_kmem_cache_node(cachep, node, n) { 2178 spin_lock_irq(&n->list_lock); 2179 drain_array_locked(cachep, n->shared, node, true, &list); 2180 spin_unlock_irq(&n->list_lock); 2181 2182 slabs_destroy(cachep, &list); 2183 } 2184 } 2185 2186 /* 2187 * Remove slabs from the list of free slabs. 2188 * Specify the number of slabs to drain in tofree. 2189 * 2190 * Returns the actual number of slabs released. 2191 */ 2192 static int drain_freelist(struct kmem_cache *cache, 2193 struct kmem_cache_node *n, int tofree) 2194 { 2195 struct list_head *p; 2196 int nr_freed; 2197 struct page *page; 2198 2199 nr_freed = 0; 2200 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2201 2202 spin_lock_irq(&n->list_lock); 2203 p = n->slabs_free.prev; 2204 if (p == &n->slabs_free) { 2205 spin_unlock_irq(&n->list_lock); 2206 goto out; 2207 } 2208 2209 page = list_entry(p, struct page, slab_list); 2210 list_del(&page->slab_list); 2211 n->free_slabs--; 2212 n->total_slabs--; 2213 /* 2214 * Safe to drop the lock. The slab is no longer linked 2215 * to the cache. 2216 */ 2217 n->free_objects -= cache->num; 2218 spin_unlock_irq(&n->list_lock); 2219 slab_destroy(cache, page); 2220 nr_freed++; 2221 } 2222 out: 2223 return nr_freed; 2224 } 2225 2226 bool __kmem_cache_empty(struct kmem_cache *s) 2227 { 2228 int node; 2229 struct kmem_cache_node *n; 2230 2231 for_each_kmem_cache_node(s, node, n) 2232 if (!list_empty(&n->slabs_full) || 2233 !list_empty(&n->slabs_partial)) 2234 return false; 2235 return true; 2236 } 2237 2238 int __kmem_cache_shrink(struct kmem_cache *cachep) 2239 { 2240 int ret = 0; 2241 int node; 2242 struct kmem_cache_node *n; 2243 2244 drain_cpu_caches(cachep); 2245 2246 check_irq_on(); 2247 for_each_kmem_cache_node(cachep, node, n) { 2248 drain_freelist(cachep, n, INT_MAX); 2249 2250 ret += !list_empty(&n->slabs_full) || 2251 !list_empty(&n->slabs_partial); 2252 } 2253 return (ret ? 1 : 0); 2254 } 2255 2256 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2257 { 2258 return __kmem_cache_shrink(cachep); 2259 } 2260 2261 void __kmem_cache_release(struct kmem_cache *cachep) 2262 { 2263 int i; 2264 struct kmem_cache_node *n; 2265 2266 cache_random_seq_destroy(cachep); 2267 2268 free_percpu(cachep->cpu_cache); 2269 2270 /* NUMA: free the node structures */ 2271 for_each_kmem_cache_node(cachep, i, n) { 2272 kfree(n->shared); 2273 free_alien_cache(n->alien); 2274 kfree(n); 2275 cachep->node[i] = NULL; 2276 } 2277 } 2278 2279 /* 2280 * Get the memory for a slab management obj. 2281 * 2282 * For a slab cache when the slab descriptor is off-slab, the 2283 * slab descriptor can't come from the same cache which is being created, 2284 * Because if it is the case, that means we defer the creation of 2285 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2286 * And we eventually call down to __kmem_cache_create(), which 2287 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. 2288 * This is a "chicken-and-egg" problem. 2289 * 2290 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2291 * which are all initialized during kmem_cache_init(). 2292 */ 2293 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2294 struct page *page, int colour_off, 2295 gfp_t local_flags, int nodeid) 2296 { 2297 void *freelist; 2298 void *addr = page_address(page); 2299 2300 page->s_mem = addr + colour_off; 2301 page->active = 0; 2302 2303 if (OBJFREELIST_SLAB(cachep)) 2304 freelist = NULL; 2305 else if (OFF_SLAB(cachep)) { 2306 /* Slab management obj is off-slab. */ 2307 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2308 local_flags, nodeid); 2309 } else { 2310 /* We will use last bytes at the slab for freelist */ 2311 freelist = addr + (PAGE_SIZE << cachep->gfporder) - 2312 cachep->freelist_size; 2313 } 2314 2315 return freelist; 2316 } 2317 2318 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) 2319 { 2320 return ((freelist_idx_t *)page->freelist)[idx]; 2321 } 2322 2323 static inline void set_free_obj(struct page *page, 2324 unsigned int idx, freelist_idx_t val) 2325 { 2326 ((freelist_idx_t *)(page->freelist))[idx] = val; 2327 } 2328 2329 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) 2330 { 2331 #if DEBUG 2332 int i; 2333 2334 for (i = 0; i < cachep->num; i++) { 2335 void *objp = index_to_obj(cachep, page, i); 2336 2337 if (cachep->flags & SLAB_STORE_USER) 2338 *dbg_userword(cachep, objp) = NULL; 2339 2340 if (cachep->flags & SLAB_RED_ZONE) { 2341 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2342 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2343 } 2344 /* 2345 * Constructors are not allowed to allocate memory from the same 2346 * cache which they are a constructor for. Otherwise, deadlock. 2347 * They must also be threaded. 2348 */ 2349 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { 2350 kasan_unpoison_object_data(cachep, 2351 objp + obj_offset(cachep)); 2352 cachep->ctor(objp + obj_offset(cachep)); 2353 kasan_poison_object_data( 2354 cachep, objp + obj_offset(cachep)); 2355 } 2356 2357 if (cachep->flags & SLAB_RED_ZONE) { 2358 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2359 slab_error(cachep, "constructor overwrote the end of an object"); 2360 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2361 slab_error(cachep, "constructor overwrote the start of an object"); 2362 } 2363 /* need to poison the objs? */ 2364 if (cachep->flags & SLAB_POISON) { 2365 poison_obj(cachep, objp, POISON_FREE); 2366 slab_kernel_map(cachep, objp, 0); 2367 } 2368 } 2369 #endif 2370 } 2371 2372 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2373 /* Hold information during a freelist initialization */ 2374 union freelist_init_state { 2375 struct { 2376 unsigned int pos; 2377 unsigned int *list; 2378 unsigned int count; 2379 }; 2380 struct rnd_state rnd_state; 2381 }; 2382 2383 /* 2384 * Initialize the state based on the randomization methode available. 2385 * return true if the pre-computed list is available, false otherwize. 2386 */ 2387 static bool freelist_state_initialize(union freelist_init_state *state, 2388 struct kmem_cache *cachep, 2389 unsigned int count) 2390 { 2391 bool ret; 2392 unsigned int rand; 2393 2394 /* Use best entropy available to define a random shift */ 2395 rand = get_random_int(); 2396 2397 /* Use a random state if the pre-computed list is not available */ 2398 if (!cachep->random_seq) { 2399 prandom_seed_state(&state->rnd_state, rand); 2400 ret = false; 2401 } else { 2402 state->list = cachep->random_seq; 2403 state->count = count; 2404 state->pos = rand % count; 2405 ret = true; 2406 } 2407 return ret; 2408 } 2409 2410 /* Get the next entry on the list and randomize it using a random shift */ 2411 static freelist_idx_t next_random_slot(union freelist_init_state *state) 2412 { 2413 if (state->pos >= state->count) 2414 state->pos = 0; 2415 return state->list[state->pos++]; 2416 } 2417 2418 /* Swap two freelist entries */ 2419 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b) 2420 { 2421 swap(((freelist_idx_t *)page->freelist)[a], 2422 ((freelist_idx_t *)page->freelist)[b]); 2423 } 2424 2425 /* 2426 * Shuffle the freelist initialization state based on pre-computed lists. 2427 * return true if the list was successfully shuffled, false otherwise. 2428 */ 2429 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) 2430 { 2431 unsigned int objfreelist = 0, i, rand, count = cachep->num; 2432 union freelist_init_state state; 2433 bool precomputed; 2434 2435 if (count < 2) 2436 return false; 2437 2438 precomputed = freelist_state_initialize(&state, cachep, count); 2439 2440 /* Take a random entry as the objfreelist */ 2441 if (OBJFREELIST_SLAB(cachep)) { 2442 if (!precomputed) 2443 objfreelist = count - 1; 2444 else 2445 objfreelist = next_random_slot(&state); 2446 page->freelist = index_to_obj(cachep, page, objfreelist) + 2447 obj_offset(cachep); 2448 count--; 2449 } 2450 2451 /* 2452 * On early boot, generate the list dynamically. 2453 * Later use a pre-computed list for speed. 2454 */ 2455 if (!precomputed) { 2456 for (i = 0; i < count; i++) 2457 set_free_obj(page, i, i); 2458 2459 /* Fisher-Yates shuffle */ 2460 for (i = count - 1; i > 0; i--) { 2461 rand = prandom_u32_state(&state.rnd_state); 2462 rand %= (i + 1); 2463 swap_free_obj(page, i, rand); 2464 } 2465 } else { 2466 for (i = 0; i < count; i++) 2467 set_free_obj(page, i, next_random_slot(&state)); 2468 } 2469 2470 if (OBJFREELIST_SLAB(cachep)) 2471 set_free_obj(page, cachep->num - 1, objfreelist); 2472 2473 return true; 2474 } 2475 #else 2476 static inline bool shuffle_freelist(struct kmem_cache *cachep, 2477 struct page *page) 2478 { 2479 return false; 2480 } 2481 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2482 2483 static void cache_init_objs(struct kmem_cache *cachep, 2484 struct page *page) 2485 { 2486 int i; 2487 void *objp; 2488 bool shuffled; 2489 2490 cache_init_objs_debug(cachep, page); 2491 2492 /* Try to randomize the freelist if enabled */ 2493 shuffled = shuffle_freelist(cachep, page); 2494 2495 if (!shuffled && OBJFREELIST_SLAB(cachep)) { 2496 page->freelist = index_to_obj(cachep, page, cachep->num - 1) + 2497 obj_offset(cachep); 2498 } 2499 2500 for (i = 0; i < cachep->num; i++) { 2501 objp = index_to_obj(cachep, page, i); 2502 objp = kasan_init_slab_obj(cachep, objp); 2503 2504 /* constructor could break poison info */ 2505 if (DEBUG == 0 && cachep->ctor) { 2506 kasan_unpoison_object_data(cachep, objp); 2507 cachep->ctor(objp); 2508 kasan_poison_object_data(cachep, objp); 2509 } 2510 2511 if (!shuffled) 2512 set_free_obj(page, i, i); 2513 } 2514 } 2515 2516 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) 2517 { 2518 void *objp; 2519 2520 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); 2521 page->active++; 2522 2523 return objp; 2524 } 2525 2526 static void slab_put_obj(struct kmem_cache *cachep, 2527 struct page *page, void *objp) 2528 { 2529 unsigned int objnr = obj_to_index(cachep, page, objp); 2530 #if DEBUG 2531 unsigned int i; 2532 2533 /* Verify double free bug */ 2534 for (i = page->active; i < cachep->num; i++) { 2535 if (get_free_obj(page, i) == objnr) { 2536 pr_err("slab: double free detected in cache '%s', objp %px\n", 2537 cachep->name, objp); 2538 BUG(); 2539 } 2540 } 2541 #endif 2542 page->active--; 2543 if (!page->freelist) 2544 page->freelist = objp + obj_offset(cachep); 2545 2546 set_free_obj(page, page->active, objnr); 2547 } 2548 2549 /* 2550 * Map pages beginning at addr to the given cache and slab. This is required 2551 * for the slab allocator to be able to lookup the cache and slab of a 2552 * virtual address for kfree, ksize, and slab debugging. 2553 */ 2554 static void slab_map_pages(struct kmem_cache *cache, struct page *page, 2555 void *freelist) 2556 { 2557 page->slab_cache = cache; 2558 page->freelist = freelist; 2559 } 2560 2561 /* 2562 * Grow (by 1) the number of slabs within a cache. This is called by 2563 * kmem_cache_alloc() when there are no active objs left in a cache. 2564 */ 2565 static struct page *cache_grow_begin(struct kmem_cache *cachep, 2566 gfp_t flags, int nodeid) 2567 { 2568 void *freelist; 2569 size_t offset; 2570 gfp_t local_flags; 2571 int page_node; 2572 struct kmem_cache_node *n; 2573 struct page *page; 2574 2575 /* 2576 * Be lazy and only check for valid flags here, keeping it out of the 2577 * critical path in kmem_cache_alloc(). 2578 */ 2579 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2580 flags = kmalloc_fix_flags(flags); 2581 2582 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 2583 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2584 2585 check_irq_off(); 2586 if (gfpflags_allow_blocking(local_flags)) 2587 local_irq_enable(); 2588 2589 /* 2590 * Get mem for the objs. Attempt to allocate a physical page from 2591 * 'nodeid'. 2592 */ 2593 page = kmem_getpages(cachep, local_flags, nodeid); 2594 if (!page) 2595 goto failed; 2596 2597 page_node = page_to_nid(page); 2598 n = get_node(cachep, page_node); 2599 2600 /* Get colour for the slab, and cal the next value. */ 2601 n->colour_next++; 2602 if (n->colour_next >= cachep->colour) 2603 n->colour_next = 0; 2604 2605 offset = n->colour_next; 2606 if (offset >= cachep->colour) 2607 offset = 0; 2608 2609 offset *= cachep->colour_off; 2610 2611 /* 2612 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so 2613 * page_address() in the latter returns a non-tagged pointer, 2614 * as it should be for slab pages. 2615 */ 2616 kasan_poison_slab(page); 2617 2618 /* Get slab management. */ 2619 freelist = alloc_slabmgmt(cachep, page, offset, 2620 local_flags & ~GFP_CONSTRAINT_MASK, page_node); 2621 if (OFF_SLAB(cachep) && !freelist) 2622 goto opps1; 2623 2624 slab_map_pages(cachep, page, freelist); 2625 2626 cache_init_objs(cachep, page); 2627 2628 if (gfpflags_allow_blocking(local_flags)) 2629 local_irq_disable(); 2630 2631 return page; 2632 2633 opps1: 2634 kmem_freepages(cachep, page); 2635 failed: 2636 if (gfpflags_allow_blocking(local_flags)) 2637 local_irq_disable(); 2638 return NULL; 2639 } 2640 2641 static void cache_grow_end(struct kmem_cache *cachep, struct page *page) 2642 { 2643 struct kmem_cache_node *n; 2644 void *list = NULL; 2645 2646 check_irq_off(); 2647 2648 if (!page) 2649 return; 2650 2651 INIT_LIST_HEAD(&page->slab_list); 2652 n = get_node(cachep, page_to_nid(page)); 2653 2654 spin_lock(&n->list_lock); 2655 n->total_slabs++; 2656 if (!page->active) { 2657 list_add_tail(&page->slab_list, &n->slabs_free); 2658 n->free_slabs++; 2659 } else 2660 fixup_slab_list(cachep, n, page, &list); 2661 2662 STATS_INC_GROWN(cachep); 2663 n->free_objects += cachep->num - page->active; 2664 spin_unlock(&n->list_lock); 2665 2666 fixup_objfreelist_debug(cachep, &list); 2667 } 2668 2669 #if DEBUG 2670 2671 /* 2672 * Perform extra freeing checks: 2673 * - detect bad pointers. 2674 * - POISON/RED_ZONE checking 2675 */ 2676 static void kfree_debugcheck(const void *objp) 2677 { 2678 if (!virt_addr_valid(objp)) { 2679 pr_err("kfree_debugcheck: out of range ptr %lxh\n", 2680 (unsigned long)objp); 2681 BUG(); 2682 } 2683 } 2684 2685 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2686 { 2687 unsigned long long redzone1, redzone2; 2688 2689 redzone1 = *dbg_redzone1(cache, obj); 2690 redzone2 = *dbg_redzone2(cache, obj); 2691 2692 /* 2693 * Redzone is ok. 2694 */ 2695 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2696 return; 2697 2698 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2699 slab_error(cache, "double free detected"); 2700 else 2701 slab_error(cache, "memory outside object was overwritten"); 2702 2703 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 2704 obj, redzone1, redzone2); 2705 } 2706 2707 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2708 unsigned long caller) 2709 { 2710 unsigned int objnr; 2711 struct page *page; 2712 2713 BUG_ON(virt_to_cache(objp) != cachep); 2714 2715 objp -= obj_offset(cachep); 2716 kfree_debugcheck(objp); 2717 page = virt_to_head_page(objp); 2718 2719 if (cachep->flags & SLAB_RED_ZONE) { 2720 verify_redzone_free(cachep, objp); 2721 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2722 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2723 } 2724 if (cachep->flags & SLAB_STORE_USER) 2725 *dbg_userword(cachep, objp) = (void *)caller; 2726 2727 objnr = obj_to_index(cachep, page, objp); 2728 2729 BUG_ON(objnr >= cachep->num); 2730 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2731 2732 if (cachep->flags & SLAB_POISON) { 2733 poison_obj(cachep, objp, POISON_FREE); 2734 slab_kernel_map(cachep, objp, 0); 2735 } 2736 return objp; 2737 } 2738 2739 #else 2740 #define kfree_debugcheck(x) do { } while(0) 2741 #define cache_free_debugcheck(x,objp,z) (objp) 2742 #endif 2743 2744 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, 2745 void **list) 2746 { 2747 #if DEBUG 2748 void *next = *list; 2749 void *objp; 2750 2751 while (next) { 2752 objp = next - obj_offset(cachep); 2753 next = *(void **)next; 2754 poison_obj(cachep, objp, POISON_FREE); 2755 } 2756 #endif 2757 } 2758 2759 static inline void fixup_slab_list(struct kmem_cache *cachep, 2760 struct kmem_cache_node *n, struct page *page, 2761 void **list) 2762 { 2763 /* move slabp to correct slabp list: */ 2764 list_del(&page->slab_list); 2765 if (page->active == cachep->num) { 2766 list_add(&page->slab_list, &n->slabs_full); 2767 if (OBJFREELIST_SLAB(cachep)) { 2768 #if DEBUG 2769 /* Poisoning will be done without holding the lock */ 2770 if (cachep->flags & SLAB_POISON) { 2771 void **objp = page->freelist; 2772 2773 *objp = *list; 2774 *list = objp; 2775 } 2776 #endif 2777 page->freelist = NULL; 2778 } 2779 } else 2780 list_add(&page->slab_list, &n->slabs_partial); 2781 } 2782 2783 /* Try to find non-pfmemalloc slab if needed */ 2784 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, 2785 struct page *page, bool pfmemalloc) 2786 { 2787 if (!page) 2788 return NULL; 2789 2790 if (pfmemalloc) 2791 return page; 2792 2793 if (!PageSlabPfmemalloc(page)) 2794 return page; 2795 2796 /* No need to keep pfmemalloc slab if we have enough free objects */ 2797 if (n->free_objects > n->free_limit) { 2798 ClearPageSlabPfmemalloc(page); 2799 return page; 2800 } 2801 2802 /* Move pfmemalloc slab to the end of list to speed up next search */ 2803 list_del(&page->slab_list); 2804 if (!page->active) { 2805 list_add_tail(&page->slab_list, &n->slabs_free); 2806 n->free_slabs++; 2807 } else 2808 list_add_tail(&page->slab_list, &n->slabs_partial); 2809 2810 list_for_each_entry(page, &n->slabs_partial, slab_list) { 2811 if (!PageSlabPfmemalloc(page)) 2812 return page; 2813 } 2814 2815 n->free_touched = 1; 2816 list_for_each_entry(page, &n->slabs_free, slab_list) { 2817 if (!PageSlabPfmemalloc(page)) { 2818 n->free_slabs--; 2819 return page; 2820 } 2821 } 2822 2823 return NULL; 2824 } 2825 2826 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) 2827 { 2828 struct page *page; 2829 2830 assert_spin_locked(&n->list_lock); 2831 page = list_first_entry_or_null(&n->slabs_partial, struct page, 2832 slab_list); 2833 if (!page) { 2834 n->free_touched = 1; 2835 page = list_first_entry_or_null(&n->slabs_free, struct page, 2836 slab_list); 2837 if (page) 2838 n->free_slabs--; 2839 } 2840 2841 if (sk_memalloc_socks()) 2842 page = get_valid_first_slab(n, page, pfmemalloc); 2843 2844 return page; 2845 } 2846 2847 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, 2848 struct kmem_cache_node *n, gfp_t flags) 2849 { 2850 struct page *page; 2851 void *obj; 2852 void *list = NULL; 2853 2854 if (!gfp_pfmemalloc_allowed(flags)) 2855 return NULL; 2856 2857 spin_lock(&n->list_lock); 2858 page = get_first_slab(n, true); 2859 if (!page) { 2860 spin_unlock(&n->list_lock); 2861 return NULL; 2862 } 2863 2864 obj = slab_get_obj(cachep, page); 2865 n->free_objects--; 2866 2867 fixup_slab_list(cachep, n, page, &list); 2868 2869 spin_unlock(&n->list_lock); 2870 fixup_objfreelist_debug(cachep, &list); 2871 2872 return obj; 2873 } 2874 2875 /* 2876 * Slab list should be fixed up by fixup_slab_list() for existing slab 2877 * or cache_grow_end() for new slab 2878 */ 2879 static __always_inline int alloc_block(struct kmem_cache *cachep, 2880 struct array_cache *ac, struct page *page, int batchcount) 2881 { 2882 /* 2883 * There must be at least one object available for 2884 * allocation. 2885 */ 2886 BUG_ON(page->active >= cachep->num); 2887 2888 while (page->active < cachep->num && batchcount--) { 2889 STATS_INC_ALLOCED(cachep); 2890 STATS_INC_ACTIVE(cachep); 2891 STATS_SET_HIGH(cachep); 2892 2893 ac->entry[ac->avail++] = slab_get_obj(cachep, page); 2894 } 2895 2896 return batchcount; 2897 } 2898 2899 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2900 { 2901 int batchcount; 2902 struct kmem_cache_node *n; 2903 struct array_cache *ac, *shared; 2904 int node; 2905 void *list = NULL; 2906 struct page *page; 2907 2908 check_irq_off(); 2909 node = numa_mem_id(); 2910 2911 ac = cpu_cache_get(cachep); 2912 batchcount = ac->batchcount; 2913 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2914 /* 2915 * If there was little recent activity on this cache, then 2916 * perform only a partial refill. Otherwise we could generate 2917 * refill bouncing. 2918 */ 2919 batchcount = BATCHREFILL_LIMIT; 2920 } 2921 n = get_node(cachep, node); 2922 2923 BUG_ON(ac->avail > 0 || !n); 2924 shared = READ_ONCE(n->shared); 2925 if (!n->free_objects && (!shared || !shared->avail)) 2926 goto direct_grow; 2927 2928 spin_lock(&n->list_lock); 2929 shared = READ_ONCE(n->shared); 2930 2931 /* See if we can refill from the shared array */ 2932 if (shared && transfer_objects(ac, shared, batchcount)) { 2933 shared->touched = 1; 2934 goto alloc_done; 2935 } 2936 2937 while (batchcount > 0) { 2938 /* Get slab alloc is to come from. */ 2939 page = get_first_slab(n, false); 2940 if (!page) 2941 goto must_grow; 2942 2943 check_spinlock_acquired(cachep); 2944 2945 batchcount = alloc_block(cachep, ac, page, batchcount); 2946 fixup_slab_list(cachep, n, page, &list); 2947 } 2948 2949 must_grow: 2950 n->free_objects -= ac->avail; 2951 alloc_done: 2952 spin_unlock(&n->list_lock); 2953 fixup_objfreelist_debug(cachep, &list); 2954 2955 direct_grow: 2956 if (unlikely(!ac->avail)) { 2957 /* Check if we can use obj in pfmemalloc slab */ 2958 if (sk_memalloc_socks()) { 2959 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); 2960 2961 if (obj) 2962 return obj; 2963 } 2964 2965 page = cache_grow_begin(cachep, gfp_exact_node(flags), node); 2966 2967 /* 2968 * cache_grow_begin() can reenable interrupts, 2969 * then ac could change. 2970 */ 2971 ac = cpu_cache_get(cachep); 2972 if (!ac->avail && page) 2973 alloc_block(cachep, ac, page, batchcount); 2974 cache_grow_end(cachep, page); 2975 2976 if (!ac->avail) 2977 return NULL; 2978 } 2979 ac->touched = 1; 2980 2981 return ac->entry[--ac->avail]; 2982 } 2983 2984 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 2985 gfp_t flags) 2986 { 2987 might_sleep_if(gfpflags_allow_blocking(flags)); 2988 } 2989 2990 #if DEBUG 2991 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2992 gfp_t flags, void *objp, unsigned long caller) 2993 { 2994 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); 2995 if (!objp) 2996 return objp; 2997 if (cachep->flags & SLAB_POISON) { 2998 check_poison_obj(cachep, objp); 2999 slab_kernel_map(cachep, objp, 1); 3000 poison_obj(cachep, objp, POISON_INUSE); 3001 } 3002 if (cachep->flags & SLAB_STORE_USER) 3003 *dbg_userword(cachep, objp) = (void *)caller; 3004 3005 if (cachep->flags & SLAB_RED_ZONE) { 3006 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3007 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3008 slab_error(cachep, "double free, or memory outside object was overwritten"); 3009 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", 3010 objp, *dbg_redzone1(cachep, objp), 3011 *dbg_redzone2(cachep, objp)); 3012 } 3013 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3014 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3015 } 3016 3017 objp += obj_offset(cachep); 3018 if (cachep->ctor && cachep->flags & SLAB_POISON) 3019 cachep->ctor(objp); 3020 if (ARCH_SLAB_MINALIGN && 3021 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3022 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3023 objp, (int)ARCH_SLAB_MINALIGN); 3024 } 3025 return objp; 3026 } 3027 #else 3028 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3029 #endif 3030 3031 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3032 { 3033 void *objp; 3034 struct array_cache *ac; 3035 3036 check_irq_off(); 3037 3038 ac = cpu_cache_get(cachep); 3039 if (likely(ac->avail)) { 3040 ac->touched = 1; 3041 objp = ac->entry[--ac->avail]; 3042 3043 STATS_INC_ALLOCHIT(cachep); 3044 goto out; 3045 } 3046 3047 STATS_INC_ALLOCMISS(cachep); 3048 objp = cache_alloc_refill(cachep, flags); 3049 /* 3050 * the 'ac' may be updated by cache_alloc_refill(), 3051 * and kmemleak_erase() requires its correct value. 3052 */ 3053 ac = cpu_cache_get(cachep); 3054 3055 out: 3056 /* 3057 * To avoid a false negative, if an object that is in one of the 3058 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3059 * treat the array pointers as a reference to the object. 3060 */ 3061 if (objp) 3062 kmemleak_erase(&ac->entry[ac->avail]); 3063 return objp; 3064 } 3065 3066 #ifdef CONFIG_NUMA 3067 /* 3068 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. 3069 * 3070 * If we are in_interrupt, then process context, including cpusets and 3071 * mempolicy, may not apply and should not be used for allocation policy. 3072 */ 3073 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3074 { 3075 int nid_alloc, nid_here; 3076 3077 if (in_interrupt() || (flags & __GFP_THISNODE)) 3078 return NULL; 3079 nid_alloc = nid_here = numa_mem_id(); 3080 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3081 nid_alloc = cpuset_slab_spread_node(); 3082 else if (current->mempolicy) 3083 nid_alloc = mempolicy_slab_node(); 3084 if (nid_alloc != nid_here) 3085 return ____cache_alloc_node(cachep, flags, nid_alloc); 3086 return NULL; 3087 } 3088 3089 /* 3090 * Fallback function if there was no memory available and no objects on a 3091 * certain node and fall back is permitted. First we scan all the 3092 * available node for available objects. If that fails then we 3093 * perform an allocation without specifying a node. This allows the page 3094 * allocator to do its reclaim / fallback magic. We then insert the 3095 * slab into the proper nodelist and then allocate from it. 3096 */ 3097 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3098 { 3099 struct zonelist *zonelist; 3100 struct zoneref *z; 3101 struct zone *zone; 3102 enum zone_type highest_zoneidx = gfp_zone(flags); 3103 void *obj = NULL; 3104 struct page *page; 3105 int nid; 3106 unsigned int cpuset_mems_cookie; 3107 3108 if (flags & __GFP_THISNODE) 3109 return NULL; 3110 3111 retry_cpuset: 3112 cpuset_mems_cookie = read_mems_allowed_begin(); 3113 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3114 3115 retry: 3116 /* 3117 * Look through allowed nodes for objects available 3118 * from existing per node queues. 3119 */ 3120 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 3121 nid = zone_to_nid(zone); 3122 3123 if (cpuset_zone_allowed(zone, flags) && 3124 get_node(cache, nid) && 3125 get_node(cache, nid)->free_objects) { 3126 obj = ____cache_alloc_node(cache, 3127 gfp_exact_node(flags), nid); 3128 if (obj) 3129 break; 3130 } 3131 } 3132 3133 if (!obj) { 3134 /* 3135 * This allocation will be performed within the constraints 3136 * of the current cpuset / memory policy requirements. 3137 * We may trigger various forms of reclaim on the allowed 3138 * set and go into memory reserves if necessary. 3139 */ 3140 page = cache_grow_begin(cache, flags, numa_mem_id()); 3141 cache_grow_end(cache, page); 3142 if (page) { 3143 nid = page_to_nid(page); 3144 obj = ____cache_alloc_node(cache, 3145 gfp_exact_node(flags), nid); 3146 3147 /* 3148 * Another processor may allocate the objects in 3149 * the slab since we are not holding any locks. 3150 */ 3151 if (!obj) 3152 goto retry; 3153 } 3154 } 3155 3156 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3157 goto retry_cpuset; 3158 return obj; 3159 } 3160 3161 /* 3162 * A interface to enable slab creation on nodeid 3163 */ 3164 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3165 int nodeid) 3166 { 3167 struct page *page; 3168 struct kmem_cache_node *n; 3169 void *obj = NULL; 3170 void *list = NULL; 3171 3172 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); 3173 n = get_node(cachep, nodeid); 3174 BUG_ON(!n); 3175 3176 check_irq_off(); 3177 spin_lock(&n->list_lock); 3178 page = get_first_slab(n, false); 3179 if (!page) 3180 goto must_grow; 3181 3182 check_spinlock_acquired_node(cachep, nodeid); 3183 3184 STATS_INC_NODEALLOCS(cachep); 3185 STATS_INC_ACTIVE(cachep); 3186 STATS_SET_HIGH(cachep); 3187 3188 BUG_ON(page->active == cachep->num); 3189 3190 obj = slab_get_obj(cachep, page); 3191 n->free_objects--; 3192 3193 fixup_slab_list(cachep, n, page, &list); 3194 3195 spin_unlock(&n->list_lock); 3196 fixup_objfreelist_debug(cachep, &list); 3197 return obj; 3198 3199 must_grow: 3200 spin_unlock(&n->list_lock); 3201 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); 3202 if (page) { 3203 /* This slab isn't counted yet so don't update free_objects */ 3204 obj = slab_get_obj(cachep, page); 3205 } 3206 cache_grow_end(cachep, page); 3207 3208 return obj ? obj : fallback_alloc(cachep, flags); 3209 } 3210 3211 static __always_inline void * 3212 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3213 unsigned long caller) 3214 { 3215 unsigned long save_flags; 3216 void *ptr; 3217 int slab_node = numa_mem_id(); 3218 struct obj_cgroup *objcg = NULL; 3219 3220 flags &= gfp_allowed_mask; 3221 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); 3222 if (unlikely(!cachep)) 3223 return NULL; 3224 3225 cache_alloc_debugcheck_before(cachep, flags); 3226 local_irq_save(save_flags); 3227 3228 if (nodeid == NUMA_NO_NODE) 3229 nodeid = slab_node; 3230 3231 if (unlikely(!get_node(cachep, nodeid))) { 3232 /* Node not bootstrapped yet */ 3233 ptr = fallback_alloc(cachep, flags); 3234 goto out; 3235 } 3236 3237 if (nodeid == slab_node) { 3238 /* 3239 * Use the locally cached objects if possible. 3240 * However ____cache_alloc does not allow fallback 3241 * to other nodes. It may fail while we still have 3242 * objects on other nodes available. 3243 */ 3244 ptr = ____cache_alloc(cachep, flags); 3245 if (ptr) 3246 goto out; 3247 } 3248 /* ___cache_alloc_node can fall back to other nodes */ 3249 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3250 out: 3251 local_irq_restore(save_flags); 3252 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3253 3254 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) 3255 memset(ptr, 0, cachep->object_size); 3256 3257 slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr); 3258 return ptr; 3259 } 3260 3261 static __always_inline void * 3262 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3263 { 3264 void *objp; 3265 3266 if (current->mempolicy || cpuset_do_slab_mem_spread()) { 3267 objp = alternate_node_alloc(cache, flags); 3268 if (objp) 3269 goto out; 3270 } 3271 objp = ____cache_alloc(cache, flags); 3272 3273 /* 3274 * We may just have run out of memory on the local node. 3275 * ____cache_alloc_node() knows how to locate memory on other nodes 3276 */ 3277 if (!objp) 3278 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); 3279 3280 out: 3281 return objp; 3282 } 3283 #else 3284 3285 static __always_inline void * 3286 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3287 { 3288 return ____cache_alloc(cachep, flags); 3289 } 3290 3291 #endif /* CONFIG_NUMA */ 3292 3293 static __always_inline void * 3294 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3295 { 3296 unsigned long save_flags; 3297 void *objp; 3298 struct obj_cgroup *objcg = NULL; 3299 3300 flags &= gfp_allowed_mask; 3301 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); 3302 if (unlikely(!cachep)) 3303 return NULL; 3304 3305 cache_alloc_debugcheck_before(cachep, flags); 3306 local_irq_save(save_flags); 3307 objp = __do_cache_alloc(cachep, flags); 3308 local_irq_restore(save_flags); 3309 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3310 prefetchw(objp); 3311 3312 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) 3313 memset(objp, 0, cachep->object_size); 3314 3315 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp); 3316 return objp; 3317 } 3318 3319 /* 3320 * Caller needs to acquire correct kmem_cache_node's list_lock 3321 * @list: List of detached free slabs should be freed by caller 3322 */ 3323 static void free_block(struct kmem_cache *cachep, void **objpp, 3324 int nr_objects, int node, struct list_head *list) 3325 { 3326 int i; 3327 struct kmem_cache_node *n = get_node(cachep, node); 3328 struct page *page; 3329 3330 n->free_objects += nr_objects; 3331 3332 for (i = 0; i < nr_objects; i++) { 3333 void *objp; 3334 struct page *page; 3335 3336 objp = objpp[i]; 3337 3338 page = virt_to_head_page(objp); 3339 list_del(&page->slab_list); 3340 check_spinlock_acquired_node(cachep, node); 3341 slab_put_obj(cachep, page, objp); 3342 STATS_DEC_ACTIVE(cachep); 3343 3344 /* fixup slab chains */ 3345 if (page->active == 0) { 3346 list_add(&page->slab_list, &n->slabs_free); 3347 n->free_slabs++; 3348 } else { 3349 /* Unconditionally move a slab to the end of the 3350 * partial list on free - maximum time for the 3351 * other objects to be freed, too. 3352 */ 3353 list_add_tail(&page->slab_list, &n->slabs_partial); 3354 } 3355 } 3356 3357 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { 3358 n->free_objects -= cachep->num; 3359 3360 page = list_last_entry(&n->slabs_free, struct page, slab_list); 3361 list_move(&page->slab_list, list); 3362 n->free_slabs--; 3363 n->total_slabs--; 3364 } 3365 } 3366 3367 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3368 { 3369 int batchcount; 3370 struct kmem_cache_node *n; 3371 int node = numa_mem_id(); 3372 LIST_HEAD(list); 3373 3374 batchcount = ac->batchcount; 3375 3376 check_irq_off(); 3377 n = get_node(cachep, node); 3378 spin_lock(&n->list_lock); 3379 if (n->shared) { 3380 struct array_cache *shared_array = n->shared; 3381 int max = shared_array->limit - shared_array->avail; 3382 if (max) { 3383 if (batchcount > max) 3384 batchcount = max; 3385 memcpy(&(shared_array->entry[shared_array->avail]), 3386 ac->entry, sizeof(void *) * batchcount); 3387 shared_array->avail += batchcount; 3388 goto free_done; 3389 } 3390 } 3391 3392 free_block(cachep, ac->entry, batchcount, node, &list); 3393 free_done: 3394 #if STATS 3395 { 3396 int i = 0; 3397 struct page *page; 3398 3399 list_for_each_entry(page, &n->slabs_free, slab_list) { 3400 BUG_ON(page->active); 3401 3402 i++; 3403 } 3404 STATS_SET_FREEABLE(cachep, i); 3405 } 3406 #endif 3407 spin_unlock(&n->list_lock); 3408 ac->avail -= batchcount; 3409 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3410 slabs_destroy(cachep, &list); 3411 } 3412 3413 /* 3414 * Release an obj back to its cache. If the obj has a constructed state, it must 3415 * be in this state _before_ it is released. Called with disabled ints. 3416 */ 3417 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, 3418 unsigned long caller) 3419 { 3420 if (unlikely(slab_want_init_on_free(cachep))) 3421 memset(objp, 0, cachep->object_size); 3422 3423 /* Put the object into the quarantine, don't touch it for now. */ 3424 if (kasan_slab_free(cachep, objp, _RET_IP_)) 3425 return; 3426 3427 /* Use KCSAN to help debug racy use-after-free. */ 3428 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) 3429 __kcsan_check_access(objp, cachep->object_size, 3430 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 3431 3432 ___cache_free(cachep, objp, caller); 3433 } 3434 3435 void ___cache_free(struct kmem_cache *cachep, void *objp, 3436 unsigned long caller) 3437 { 3438 struct array_cache *ac = cpu_cache_get(cachep); 3439 3440 check_irq_off(); 3441 kmemleak_free_recursive(objp, cachep->flags); 3442 objp = cache_free_debugcheck(cachep, objp, caller); 3443 memcg_slab_free_hook(cachep, &objp, 1); 3444 3445 /* 3446 * Skip calling cache_free_alien() when the platform is not numa. 3447 * This will avoid cache misses that happen while accessing slabp (which 3448 * is per page memory reference) to get nodeid. Instead use a global 3449 * variable to skip the call, which is mostly likely to be present in 3450 * the cache. 3451 */ 3452 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3453 return; 3454 3455 if (ac->avail < ac->limit) { 3456 STATS_INC_FREEHIT(cachep); 3457 } else { 3458 STATS_INC_FREEMISS(cachep); 3459 cache_flusharray(cachep, ac); 3460 } 3461 3462 if (sk_memalloc_socks()) { 3463 struct page *page = virt_to_head_page(objp); 3464 3465 if (unlikely(PageSlabPfmemalloc(page))) { 3466 cache_free_pfmemalloc(cachep, page, objp); 3467 return; 3468 } 3469 } 3470 3471 __free_one(ac, objp); 3472 } 3473 3474 /** 3475 * kmem_cache_alloc - Allocate an object 3476 * @cachep: The cache to allocate from. 3477 * @flags: See kmalloc(). 3478 * 3479 * Allocate an object from this cache. The flags are only relevant 3480 * if the cache has no available objects. 3481 * 3482 * Return: pointer to the new object or %NULL in case of error 3483 */ 3484 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3485 { 3486 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3487 3488 trace_kmem_cache_alloc(_RET_IP_, ret, 3489 cachep->object_size, cachep->size, flags); 3490 3491 return ret; 3492 } 3493 EXPORT_SYMBOL(kmem_cache_alloc); 3494 3495 static __always_inline void 3496 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, 3497 size_t size, void **p, unsigned long caller) 3498 { 3499 size_t i; 3500 3501 for (i = 0; i < size; i++) 3502 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); 3503 } 3504 3505 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3506 void **p) 3507 { 3508 size_t i; 3509 struct obj_cgroup *objcg = NULL; 3510 3511 s = slab_pre_alloc_hook(s, &objcg, size, flags); 3512 if (!s) 3513 return 0; 3514 3515 cache_alloc_debugcheck_before(s, flags); 3516 3517 local_irq_disable(); 3518 for (i = 0; i < size; i++) { 3519 void *objp = __do_cache_alloc(s, flags); 3520 3521 if (unlikely(!objp)) 3522 goto error; 3523 p[i] = objp; 3524 } 3525 local_irq_enable(); 3526 3527 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); 3528 3529 /* Clear memory outside IRQ disabled section */ 3530 if (unlikely(slab_want_init_on_alloc(flags, s))) 3531 for (i = 0; i < size; i++) 3532 memset(p[i], 0, s->object_size); 3533 3534 slab_post_alloc_hook(s, objcg, flags, size, p); 3535 /* FIXME: Trace call missing. Christoph would like a bulk variant */ 3536 return size; 3537 error: 3538 local_irq_enable(); 3539 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); 3540 slab_post_alloc_hook(s, objcg, flags, i, p); 3541 __kmem_cache_free_bulk(s, i, p); 3542 return 0; 3543 } 3544 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3545 3546 #ifdef CONFIG_TRACING 3547 void * 3548 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3549 { 3550 void *ret; 3551 3552 ret = slab_alloc(cachep, flags, _RET_IP_); 3553 3554 ret = kasan_kmalloc(cachep, ret, size, flags); 3555 trace_kmalloc(_RET_IP_, ret, 3556 size, cachep->size, flags); 3557 return ret; 3558 } 3559 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3560 #endif 3561 3562 #ifdef CONFIG_NUMA 3563 /** 3564 * kmem_cache_alloc_node - Allocate an object on the specified node 3565 * @cachep: The cache to allocate from. 3566 * @flags: See kmalloc(). 3567 * @nodeid: node number of the target node. 3568 * 3569 * Identical to kmem_cache_alloc but it will allocate memory on the given 3570 * node, which can improve the performance for cpu bound structures. 3571 * 3572 * Fallback to other node is possible if __GFP_THISNODE is not set. 3573 * 3574 * Return: pointer to the new object or %NULL in case of error 3575 */ 3576 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3577 { 3578 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3579 3580 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3581 cachep->object_size, cachep->size, 3582 flags, nodeid); 3583 3584 return ret; 3585 } 3586 EXPORT_SYMBOL(kmem_cache_alloc_node); 3587 3588 #ifdef CONFIG_TRACING 3589 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3590 gfp_t flags, 3591 int nodeid, 3592 size_t size) 3593 { 3594 void *ret; 3595 3596 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3597 3598 ret = kasan_kmalloc(cachep, ret, size, flags); 3599 trace_kmalloc_node(_RET_IP_, ret, 3600 size, cachep->size, 3601 flags, nodeid); 3602 return ret; 3603 } 3604 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3605 #endif 3606 3607 static __always_inline void * 3608 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3609 { 3610 struct kmem_cache *cachep; 3611 void *ret; 3612 3613 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3614 return NULL; 3615 cachep = kmalloc_slab(size, flags); 3616 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3617 return cachep; 3618 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); 3619 ret = kasan_kmalloc(cachep, ret, size, flags); 3620 3621 return ret; 3622 } 3623 3624 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3625 { 3626 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3627 } 3628 EXPORT_SYMBOL(__kmalloc_node); 3629 3630 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3631 int node, unsigned long caller) 3632 { 3633 return __do_kmalloc_node(size, flags, node, caller); 3634 } 3635 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3636 #endif /* CONFIG_NUMA */ 3637 3638 /** 3639 * __do_kmalloc - allocate memory 3640 * @size: how many bytes of memory are required. 3641 * @flags: the type of memory to allocate (see kmalloc). 3642 * @caller: function caller for debug tracking of the caller 3643 * 3644 * Return: pointer to the allocated memory or %NULL in case of error 3645 */ 3646 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3647 unsigned long caller) 3648 { 3649 struct kmem_cache *cachep; 3650 void *ret; 3651 3652 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) 3653 return NULL; 3654 cachep = kmalloc_slab(size, flags); 3655 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 return cachep; 3657 ret = slab_alloc(cachep, flags, caller); 3658 3659 ret = kasan_kmalloc(cachep, ret, size, flags); 3660 trace_kmalloc(caller, ret, 3661 size, cachep->size, flags); 3662 3663 return ret; 3664 } 3665 3666 void *__kmalloc(size_t size, gfp_t flags) 3667 { 3668 return __do_kmalloc(size, flags, _RET_IP_); 3669 } 3670 EXPORT_SYMBOL(__kmalloc); 3671 3672 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3673 { 3674 return __do_kmalloc(size, flags, caller); 3675 } 3676 EXPORT_SYMBOL(__kmalloc_track_caller); 3677 3678 /** 3679 * kmem_cache_free - Deallocate an object 3680 * @cachep: The cache the allocation was from. 3681 * @objp: The previously allocated object. 3682 * 3683 * Free an object which was previously allocated from this 3684 * cache. 3685 */ 3686 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3687 { 3688 unsigned long flags; 3689 cachep = cache_from_obj(cachep, objp); 3690 if (!cachep) 3691 return; 3692 3693 local_irq_save(flags); 3694 debug_check_no_locks_freed(objp, cachep->object_size); 3695 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3696 debug_check_no_obj_freed(objp, cachep->object_size); 3697 __cache_free(cachep, objp, _RET_IP_); 3698 local_irq_restore(flags); 3699 3700 trace_kmem_cache_free(_RET_IP_, objp); 3701 } 3702 EXPORT_SYMBOL(kmem_cache_free); 3703 3704 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) 3705 { 3706 struct kmem_cache *s; 3707 size_t i; 3708 3709 local_irq_disable(); 3710 for (i = 0; i < size; i++) { 3711 void *objp = p[i]; 3712 3713 if (!orig_s) /* called via kfree_bulk */ 3714 s = virt_to_cache(objp); 3715 else 3716 s = cache_from_obj(orig_s, objp); 3717 if (!s) 3718 continue; 3719 3720 debug_check_no_locks_freed(objp, s->object_size); 3721 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 3722 debug_check_no_obj_freed(objp, s->object_size); 3723 3724 __cache_free(s, objp, _RET_IP_); 3725 } 3726 local_irq_enable(); 3727 3728 /* FIXME: add tracing */ 3729 } 3730 EXPORT_SYMBOL(kmem_cache_free_bulk); 3731 3732 /** 3733 * kfree - free previously allocated memory 3734 * @objp: pointer returned by kmalloc. 3735 * 3736 * If @objp is NULL, no operation is performed. 3737 * 3738 * Don't free memory not originally allocated by kmalloc() 3739 * or you will run into trouble. 3740 */ 3741 void kfree(const void *objp) 3742 { 3743 struct kmem_cache *c; 3744 unsigned long flags; 3745 3746 trace_kfree(_RET_IP_, objp); 3747 3748 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3749 return; 3750 local_irq_save(flags); 3751 kfree_debugcheck(objp); 3752 c = virt_to_cache(objp); 3753 if (!c) { 3754 local_irq_restore(flags); 3755 return; 3756 } 3757 debug_check_no_locks_freed(objp, c->object_size); 3758 3759 debug_check_no_obj_freed(objp, c->object_size); 3760 __cache_free(c, (void *)objp, _RET_IP_); 3761 local_irq_restore(flags); 3762 } 3763 EXPORT_SYMBOL(kfree); 3764 3765 /* 3766 * This initializes kmem_cache_node or resizes various caches for all nodes. 3767 */ 3768 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) 3769 { 3770 int ret; 3771 int node; 3772 struct kmem_cache_node *n; 3773 3774 for_each_online_node(node) { 3775 ret = setup_kmem_cache_node(cachep, node, gfp, true); 3776 if (ret) 3777 goto fail; 3778 3779 } 3780 3781 return 0; 3782 3783 fail: 3784 if (!cachep->list.next) { 3785 /* Cache is not active yet. Roll back what we did */ 3786 node--; 3787 while (node >= 0) { 3788 n = get_node(cachep, node); 3789 if (n) { 3790 kfree(n->shared); 3791 free_alien_cache(n->alien); 3792 kfree(n); 3793 cachep->node[node] = NULL; 3794 } 3795 node--; 3796 } 3797 } 3798 return -ENOMEM; 3799 } 3800 3801 /* Always called with the slab_mutex held */ 3802 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3803 int batchcount, int shared, gfp_t gfp) 3804 { 3805 struct array_cache __percpu *cpu_cache, *prev; 3806 int cpu; 3807 3808 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); 3809 if (!cpu_cache) 3810 return -ENOMEM; 3811 3812 prev = cachep->cpu_cache; 3813 cachep->cpu_cache = cpu_cache; 3814 /* 3815 * Without a previous cpu_cache there's no need to synchronize remote 3816 * cpus, so skip the IPIs. 3817 */ 3818 if (prev) 3819 kick_all_cpus_sync(); 3820 3821 check_irq_on(); 3822 cachep->batchcount = batchcount; 3823 cachep->limit = limit; 3824 cachep->shared = shared; 3825 3826 if (!prev) 3827 goto setup_node; 3828 3829 for_each_online_cpu(cpu) { 3830 LIST_HEAD(list); 3831 int node; 3832 struct kmem_cache_node *n; 3833 struct array_cache *ac = per_cpu_ptr(prev, cpu); 3834 3835 node = cpu_to_mem(cpu); 3836 n = get_node(cachep, node); 3837 spin_lock_irq(&n->list_lock); 3838 free_block(cachep, ac->entry, ac->avail, node, &list); 3839 spin_unlock_irq(&n->list_lock); 3840 slabs_destroy(cachep, &list); 3841 } 3842 free_percpu(prev); 3843 3844 setup_node: 3845 return setup_kmem_cache_nodes(cachep, gfp); 3846 } 3847 3848 /* Called with slab_mutex held always */ 3849 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3850 { 3851 int err; 3852 int limit = 0; 3853 int shared = 0; 3854 int batchcount = 0; 3855 3856 err = cache_random_seq_create(cachep, cachep->num, gfp); 3857 if (err) 3858 goto end; 3859 3860 if (limit && shared && batchcount) 3861 goto skip_setup; 3862 /* 3863 * The head array serves three purposes: 3864 * - create a LIFO ordering, i.e. return objects that are cache-warm 3865 * - reduce the number of spinlock operations. 3866 * - reduce the number of linked list operations on the slab and 3867 * bufctl chains: array operations are cheaper. 3868 * The numbers are guessed, we should auto-tune as described by 3869 * Bonwick. 3870 */ 3871 if (cachep->size > 131072) 3872 limit = 1; 3873 else if (cachep->size > PAGE_SIZE) 3874 limit = 8; 3875 else if (cachep->size > 1024) 3876 limit = 24; 3877 else if (cachep->size > 256) 3878 limit = 54; 3879 else 3880 limit = 120; 3881 3882 /* 3883 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3884 * allocation behaviour: Most allocs on one cpu, most free operations 3885 * on another cpu. For these cases, an efficient object passing between 3886 * cpus is necessary. This is provided by a shared array. The array 3887 * replaces Bonwick's magazine layer. 3888 * On uniprocessor, it's functionally equivalent (but less efficient) 3889 * to a larger limit. Thus disabled by default. 3890 */ 3891 shared = 0; 3892 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3893 shared = 8; 3894 3895 #if DEBUG 3896 /* 3897 * With debugging enabled, large batchcount lead to excessively long 3898 * periods with disabled local interrupts. Limit the batchcount 3899 */ 3900 if (limit > 32) 3901 limit = 32; 3902 #endif 3903 batchcount = (limit + 1) / 2; 3904 skip_setup: 3905 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3906 end: 3907 if (err) 3908 pr_err("enable_cpucache failed for %s, error %d\n", 3909 cachep->name, -err); 3910 return err; 3911 } 3912 3913 /* 3914 * Drain an array if it contains any elements taking the node lock only if 3915 * necessary. Note that the node listlock also protects the array_cache 3916 * if drain_array() is used on the shared array. 3917 */ 3918 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3919 struct array_cache *ac, int node) 3920 { 3921 LIST_HEAD(list); 3922 3923 /* ac from n->shared can be freed if we don't hold the slab_mutex. */ 3924 check_mutex_acquired(); 3925 3926 if (!ac || !ac->avail) 3927 return; 3928 3929 if (ac->touched) { 3930 ac->touched = 0; 3931 return; 3932 } 3933 3934 spin_lock_irq(&n->list_lock); 3935 drain_array_locked(cachep, ac, node, false, &list); 3936 spin_unlock_irq(&n->list_lock); 3937 3938 slabs_destroy(cachep, &list); 3939 } 3940 3941 /** 3942 * cache_reap - Reclaim memory from caches. 3943 * @w: work descriptor 3944 * 3945 * Called from workqueue/eventd every few seconds. 3946 * Purpose: 3947 * - clear the per-cpu caches for this CPU. 3948 * - return freeable pages to the main free memory pool. 3949 * 3950 * If we cannot acquire the cache chain mutex then just give up - we'll try 3951 * again on the next iteration. 3952 */ 3953 static void cache_reap(struct work_struct *w) 3954 { 3955 struct kmem_cache *searchp; 3956 struct kmem_cache_node *n; 3957 int node = numa_mem_id(); 3958 struct delayed_work *work = to_delayed_work(w); 3959 3960 if (!mutex_trylock(&slab_mutex)) 3961 /* Give up. Setup the next iteration. */ 3962 goto out; 3963 3964 list_for_each_entry(searchp, &slab_caches, list) { 3965 check_irq_on(); 3966 3967 /* 3968 * We only take the node lock if absolutely necessary and we 3969 * have established with reasonable certainty that 3970 * we can do some work if the lock was obtained. 3971 */ 3972 n = get_node(searchp, node); 3973 3974 reap_alien(searchp, n); 3975 3976 drain_array(searchp, n, cpu_cache_get(searchp), node); 3977 3978 /* 3979 * These are racy checks but it does not matter 3980 * if we skip one check or scan twice. 3981 */ 3982 if (time_after(n->next_reap, jiffies)) 3983 goto next; 3984 3985 n->next_reap = jiffies + REAPTIMEOUT_NODE; 3986 3987 drain_array(searchp, n, n->shared, node); 3988 3989 if (n->free_touched) 3990 n->free_touched = 0; 3991 else { 3992 int freed; 3993 3994 freed = drain_freelist(searchp, n, (n->free_limit + 3995 5 * searchp->num - 1) / (5 * searchp->num)); 3996 STATS_ADD_REAPED(searchp, freed); 3997 } 3998 next: 3999 cond_resched(); 4000 } 4001 check_irq_on(); 4002 mutex_unlock(&slab_mutex); 4003 next_reap_node(); 4004 out: 4005 /* Set up the next iteration */ 4006 schedule_delayed_work_on(smp_processor_id(), work, 4007 round_jiffies_relative(REAPTIMEOUT_AC)); 4008 } 4009 4010 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 4011 { 4012 unsigned long active_objs, num_objs, active_slabs; 4013 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; 4014 unsigned long free_slabs = 0; 4015 int node; 4016 struct kmem_cache_node *n; 4017 4018 for_each_kmem_cache_node(cachep, node, n) { 4019 check_irq_on(); 4020 spin_lock_irq(&n->list_lock); 4021 4022 total_slabs += n->total_slabs; 4023 free_slabs += n->free_slabs; 4024 free_objs += n->free_objects; 4025 4026 if (n->shared) 4027 shared_avail += n->shared->avail; 4028 4029 spin_unlock_irq(&n->list_lock); 4030 } 4031 num_objs = total_slabs * cachep->num; 4032 active_slabs = total_slabs - free_slabs; 4033 active_objs = num_objs - free_objs; 4034 4035 sinfo->active_objs = active_objs; 4036 sinfo->num_objs = num_objs; 4037 sinfo->active_slabs = active_slabs; 4038 sinfo->num_slabs = total_slabs; 4039 sinfo->shared_avail = shared_avail; 4040 sinfo->limit = cachep->limit; 4041 sinfo->batchcount = cachep->batchcount; 4042 sinfo->shared = cachep->shared; 4043 sinfo->objects_per_slab = cachep->num; 4044 sinfo->cache_order = cachep->gfporder; 4045 } 4046 4047 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 4048 { 4049 #if STATS 4050 { /* node stats */ 4051 unsigned long high = cachep->high_mark; 4052 unsigned long allocs = cachep->num_allocations; 4053 unsigned long grown = cachep->grown; 4054 unsigned long reaped = cachep->reaped; 4055 unsigned long errors = cachep->errors; 4056 unsigned long max_freeable = cachep->max_freeable; 4057 unsigned long node_allocs = cachep->node_allocs; 4058 unsigned long node_frees = cachep->node_frees; 4059 unsigned long overflows = cachep->node_overflow; 4060 4061 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", 4062 allocs, high, grown, 4063 reaped, errors, max_freeable, node_allocs, 4064 node_frees, overflows); 4065 } 4066 /* cpu stats */ 4067 { 4068 unsigned long allochit = atomic_read(&cachep->allochit); 4069 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4070 unsigned long freehit = atomic_read(&cachep->freehit); 4071 unsigned long freemiss = atomic_read(&cachep->freemiss); 4072 4073 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4074 allochit, allocmiss, freehit, freemiss); 4075 } 4076 #endif 4077 } 4078 4079 #define MAX_SLABINFO_WRITE 128 4080 /** 4081 * slabinfo_write - Tuning for the slab allocator 4082 * @file: unused 4083 * @buffer: user buffer 4084 * @count: data length 4085 * @ppos: unused 4086 * 4087 * Return: %0 on success, negative error code otherwise. 4088 */ 4089 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4090 size_t count, loff_t *ppos) 4091 { 4092 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4093 int limit, batchcount, shared, res; 4094 struct kmem_cache *cachep; 4095 4096 if (count > MAX_SLABINFO_WRITE) 4097 return -EINVAL; 4098 if (copy_from_user(&kbuf, buffer, count)) 4099 return -EFAULT; 4100 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4101 4102 tmp = strchr(kbuf, ' '); 4103 if (!tmp) 4104 return -EINVAL; 4105 *tmp = '\0'; 4106 tmp++; 4107 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4108 return -EINVAL; 4109 4110 /* Find the cache in the chain of caches. */ 4111 mutex_lock(&slab_mutex); 4112 res = -EINVAL; 4113 list_for_each_entry(cachep, &slab_caches, list) { 4114 if (!strcmp(cachep->name, kbuf)) { 4115 if (limit < 1 || batchcount < 1 || 4116 batchcount > limit || shared < 0) { 4117 res = 0; 4118 } else { 4119 res = do_tune_cpucache(cachep, limit, 4120 batchcount, shared, 4121 GFP_KERNEL); 4122 } 4123 break; 4124 } 4125 } 4126 mutex_unlock(&slab_mutex); 4127 if (res >= 0) 4128 res = count; 4129 return res; 4130 } 4131 4132 #ifdef CONFIG_HARDENED_USERCOPY 4133 /* 4134 * Rejects incorrectly sized objects and objects that are to be copied 4135 * to/from userspace but do not fall entirely within the containing slab 4136 * cache's usercopy region. 4137 * 4138 * Returns NULL if check passes, otherwise const char * to name of cache 4139 * to indicate an error. 4140 */ 4141 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 4142 bool to_user) 4143 { 4144 struct kmem_cache *cachep; 4145 unsigned int objnr; 4146 unsigned long offset; 4147 4148 ptr = kasan_reset_tag(ptr); 4149 4150 /* Find and validate object. */ 4151 cachep = page->slab_cache; 4152 objnr = obj_to_index(cachep, page, (void *)ptr); 4153 BUG_ON(objnr >= cachep->num); 4154 4155 /* Find offset within object. */ 4156 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); 4157 4158 /* Allow address range falling entirely within usercopy region. */ 4159 if (offset >= cachep->useroffset && 4160 offset - cachep->useroffset <= cachep->usersize && 4161 n <= cachep->useroffset - offset + cachep->usersize) 4162 return; 4163 4164 /* 4165 * If the copy is still within the allocated object, produce 4166 * a warning instead of rejecting the copy. This is intended 4167 * to be a temporary method to find any missing usercopy 4168 * whitelists. 4169 */ 4170 if (usercopy_fallback && 4171 offset <= cachep->object_size && 4172 n <= cachep->object_size - offset) { 4173 usercopy_warn("SLAB object", cachep->name, to_user, offset, n); 4174 return; 4175 } 4176 4177 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); 4178 } 4179 #endif /* CONFIG_HARDENED_USERCOPY */ 4180 4181 /** 4182 * __ksize -- Uninstrumented ksize. 4183 * @objp: pointer to the object 4184 * 4185 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same 4186 * safety checks as ksize() with KASAN instrumentation enabled. 4187 * 4188 * Return: size of the actual memory used by @objp in bytes 4189 */ 4190 size_t __ksize(const void *objp) 4191 { 4192 struct kmem_cache *c; 4193 size_t size; 4194 4195 BUG_ON(!objp); 4196 if (unlikely(objp == ZERO_SIZE_PTR)) 4197 return 0; 4198 4199 c = virt_to_cache(objp); 4200 size = c ? c->object_size : 0; 4201 4202 return size; 4203 } 4204 EXPORT_SYMBOL(__ksize); 4205