1 /* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same initializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'slab_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89 #include <linux/slab.h> 90 #include <linux/mm.h> 91 #include <linux/poison.h> 92 #include <linux/swap.h> 93 #include <linux/cache.h> 94 #include <linux/interrupt.h> 95 #include <linux/init.h> 96 #include <linux/compiler.h> 97 #include <linux/cpuset.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <linux/notifier.h> 101 #include <linux/kallsyms.h> 102 #include <linux/cpu.h> 103 #include <linux/sysctl.h> 104 #include <linux/module.h> 105 #include <linux/rcupdate.h> 106 #include <linux/string.h> 107 #include <linux/uaccess.h> 108 #include <linux/nodemask.h> 109 #include <linux/kmemleak.h> 110 #include <linux/mempolicy.h> 111 #include <linux/mutex.h> 112 #include <linux/fault-inject.h> 113 #include <linux/rtmutex.h> 114 #include <linux/reciprocal_div.h> 115 #include <linux/debugobjects.h> 116 #include <linux/kmemcheck.h> 117 #include <linux/memory.h> 118 #include <linux/prefetch.h> 119 120 #include <net/sock.h> 121 122 #include <asm/cacheflush.h> 123 #include <asm/tlbflush.h> 124 #include <asm/page.h> 125 126 #include <trace/events/kmem.h> 127 128 #include "internal.h" 129 130 #include "slab.h" 131 132 /* 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 134 * 0 for faster, smaller code (especially in the critical paths). 135 * 136 * STATS - 1 to collect stats for /proc/slabinfo. 137 * 0 for faster, smaller code (especially in the critical paths). 138 * 139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 140 */ 141 142 #ifdef CONFIG_DEBUG_SLAB 143 #define DEBUG 1 144 #define STATS 1 145 #define FORCED_DEBUG 1 146 #else 147 #define DEBUG 0 148 #define STATS 0 149 #define FORCED_DEBUG 0 150 #endif 151 152 /* Shouldn't this be in a header file somewhere? */ 153 #define BYTES_PER_WORD sizeof(void *) 154 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 155 156 #ifndef ARCH_KMALLOC_FLAGS 157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 158 #endif 159 160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 162 163 #if FREELIST_BYTE_INDEX 164 typedef unsigned char freelist_idx_t; 165 #else 166 typedef unsigned short freelist_idx_t; 167 #endif 168 169 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 170 171 /* 172 * true if a page was allocated from pfmemalloc reserves for network-based 173 * swap 174 */ 175 static bool pfmemalloc_active __read_mostly; 176 177 /* 178 * struct array_cache 179 * 180 * Purpose: 181 * - LIFO ordering, to hand out cache-warm objects from _alloc 182 * - reduce the number of linked list operations 183 * - reduce spinlock operations 184 * 185 * The limit is stored in the per-cpu structure to reduce the data cache 186 * footprint. 187 * 188 */ 189 struct array_cache { 190 unsigned int avail; 191 unsigned int limit; 192 unsigned int batchcount; 193 unsigned int touched; 194 void *entry[]; /* 195 * Must have this definition in here for the proper 196 * alignment of array_cache. Also simplifies accessing 197 * the entries. 198 * 199 * Entries should not be directly dereferenced as 200 * entries belonging to slabs marked pfmemalloc will 201 * have the lower bits set SLAB_OBJ_PFMEMALLOC 202 */ 203 }; 204 205 struct alien_cache { 206 spinlock_t lock; 207 struct array_cache ac; 208 }; 209 210 #define SLAB_OBJ_PFMEMALLOC 1 211 static inline bool is_obj_pfmemalloc(void *objp) 212 { 213 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC; 214 } 215 216 static inline void set_obj_pfmemalloc(void **objp) 217 { 218 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC); 219 return; 220 } 221 222 static inline void clear_obj_pfmemalloc(void **objp) 223 { 224 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC); 225 } 226 227 /* 228 * bootstrap: The caches do not work without cpuarrays anymore, but the 229 * cpuarrays are allocated from the generic caches... 230 */ 231 #define BOOT_CPUCACHE_ENTRIES 1 232 struct arraycache_init { 233 struct array_cache cache; 234 void *entries[BOOT_CPUCACHE_ENTRIES]; 235 }; 236 237 /* 238 * Need this for bootstrapping a per node allocator. 239 */ 240 #define NUM_INIT_LISTS (2 * MAX_NUMNODES) 241 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 242 #define CACHE_CACHE 0 243 #define SIZE_NODE (MAX_NUMNODES) 244 245 static int drain_freelist(struct kmem_cache *cache, 246 struct kmem_cache_node *n, int tofree); 247 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 248 int node, struct list_head *list); 249 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); 250 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 251 static void cache_reap(struct work_struct *unused); 252 253 static int slab_early_init = 1; 254 255 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 256 257 static void kmem_cache_node_init(struct kmem_cache_node *parent) 258 { 259 INIT_LIST_HEAD(&parent->slabs_full); 260 INIT_LIST_HEAD(&parent->slabs_partial); 261 INIT_LIST_HEAD(&parent->slabs_free); 262 parent->shared = NULL; 263 parent->alien = NULL; 264 parent->colour_next = 0; 265 spin_lock_init(&parent->list_lock); 266 parent->free_objects = 0; 267 parent->free_touched = 0; 268 } 269 270 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 271 do { \ 272 INIT_LIST_HEAD(listp); \ 273 list_splice(&get_node(cachep, nodeid)->slab, listp); \ 274 } while (0) 275 276 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 277 do { \ 278 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 279 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 280 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 281 } while (0) 282 283 #define CFLGS_OFF_SLAB (0x80000000UL) 284 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 285 286 #define BATCHREFILL_LIMIT 16 287 /* 288 * Optimization question: fewer reaps means less probability for unnessary 289 * cpucache drain/refill cycles. 290 * 291 * OTOH the cpuarrays can contain lots of objects, 292 * which could lock up otherwise freeable slabs. 293 */ 294 #define REAPTIMEOUT_AC (2*HZ) 295 #define REAPTIMEOUT_NODE (4*HZ) 296 297 #if STATS 298 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 299 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 300 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 301 #define STATS_INC_GROWN(x) ((x)->grown++) 302 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 303 #define STATS_SET_HIGH(x) \ 304 do { \ 305 if ((x)->num_active > (x)->high_mark) \ 306 (x)->high_mark = (x)->num_active; \ 307 } while (0) 308 #define STATS_INC_ERR(x) ((x)->errors++) 309 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 310 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 311 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 312 #define STATS_SET_FREEABLE(x, i) \ 313 do { \ 314 if ((x)->max_freeable < i) \ 315 (x)->max_freeable = i; \ 316 } while (0) 317 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 318 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 319 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 320 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 321 #else 322 #define STATS_INC_ACTIVE(x) do { } while (0) 323 #define STATS_DEC_ACTIVE(x) do { } while (0) 324 #define STATS_INC_ALLOCED(x) do { } while (0) 325 #define STATS_INC_GROWN(x) do { } while (0) 326 #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) 327 #define STATS_SET_HIGH(x) do { } while (0) 328 #define STATS_INC_ERR(x) do { } while (0) 329 #define STATS_INC_NODEALLOCS(x) do { } while (0) 330 #define STATS_INC_NODEFREES(x) do { } while (0) 331 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 332 #define STATS_SET_FREEABLE(x, i) do { } while (0) 333 #define STATS_INC_ALLOCHIT(x) do { } while (0) 334 #define STATS_INC_ALLOCMISS(x) do { } while (0) 335 #define STATS_INC_FREEHIT(x) do { } while (0) 336 #define STATS_INC_FREEMISS(x) do { } while (0) 337 #endif 338 339 #if DEBUG 340 341 /* 342 * memory layout of objects: 343 * 0 : objp 344 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 345 * the end of an object is aligned with the end of the real 346 * allocation. Catches writes behind the end of the allocation. 347 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 348 * redzone word. 349 * cachep->obj_offset: The real object. 350 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 351 * cachep->size - 1* BYTES_PER_WORD: last caller address 352 * [BYTES_PER_WORD long] 353 */ 354 static int obj_offset(struct kmem_cache *cachep) 355 { 356 return cachep->obj_offset; 357 } 358 359 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 360 { 361 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 362 return (unsigned long long*) (objp + obj_offset(cachep) - 363 sizeof(unsigned long long)); 364 } 365 366 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 367 { 368 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 369 if (cachep->flags & SLAB_STORE_USER) 370 return (unsigned long long *)(objp + cachep->size - 371 sizeof(unsigned long long) - 372 REDZONE_ALIGN); 373 return (unsigned long long *) (objp + cachep->size - 374 sizeof(unsigned long long)); 375 } 376 377 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 378 { 379 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 380 return (void **)(objp + cachep->size - BYTES_PER_WORD); 381 } 382 383 #else 384 385 #define obj_offset(x) 0 386 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 387 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 388 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 389 390 #endif 391 392 #define OBJECT_FREE (0) 393 #define OBJECT_ACTIVE (1) 394 395 #ifdef CONFIG_DEBUG_SLAB_LEAK 396 397 static void set_obj_status(struct page *page, int idx, int val) 398 { 399 int freelist_size; 400 char *status; 401 struct kmem_cache *cachep = page->slab_cache; 402 403 freelist_size = cachep->num * sizeof(freelist_idx_t); 404 status = (char *)page->freelist + freelist_size; 405 status[idx] = val; 406 } 407 408 static inline unsigned int get_obj_status(struct page *page, int idx) 409 { 410 int freelist_size; 411 char *status; 412 struct kmem_cache *cachep = page->slab_cache; 413 414 freelist_size = cachep->num * sizeof(freelist_idx_t); 415 status = (char *)page->freelist + freelist_size; 416 417 return status[idx]; 418 } 419 420 #else 421 static inline void set_obj_status(struct page *page, int idx, int val) {} 422 423 #endif 424 425 /* 426 * Do not go above this order unless 0 objects fit into the slab or 427 * overridden on the command line. 428 */ 429 #define SLAB_MAX_ORDER_HI 1 430 #define SLAB_MAX_ORDER_LO 0 431 static int slab_max_order = SLAB_MAX_ORDER_LO; 432 static bool slab_max_order_set __initdata; 433 434 static inline struct kmem_cache *virt_to_cache(const void *obj) 435 { 436 struct page *page = virt_to_head_page(obj); 437 return page->slab_cache; 438 } 439 440 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, 441 unsigned int idx) 442 { 443 return page->s_mem + cache->size * idx; 444 } 445 446 /* 447 * We want to avoid an expensive divide : (offset / cache->size) 448 * Using the fact that size is a constant for a particular cache, 449 * we can replace (offset / cache->size) by 450 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 451 */ 452 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 453 const struct page *page, void *obj) 454 { 455 u32 offset = (obj - page->s_mem); 456 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 457 } 458 459 /* internal cache of cache description objs */ 460 static struct kmem_cache kmem_cache_boot = { 461 .batchcount = 1, 462 .limit = BOOT_CPUCACHE_ENTRIES, 463 .shared = 1, 464 .size = sizeof(struct kmem_cache), 465 .name = "kmem_cache", 466 }; 467 468 #define BAD_ALIEN_MAGIC 0x01020304ul 469 470 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 471 472 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 473 { 474 return this_cpu_ptr(cachep->cpu_cache); 475 } 476 477 static size_t calculate_freelist_size(int nr_objs, size_t align) 478 { 479 size_t freelist_size; 480 481 freelist_size = nr_objs * sizeof(freelist_idx_t); 482 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 483 freelist_size += nr_objs * sizeof(char); 484 485 if (align) 486 freelist_size = ALIGN(freelist_size, align); 487 488 return freelist_size; 489 } 490 491 static int calculate_nr_objs(size_t slab_size, size_t buffer_size, 492 size_t idx_size, size_t align) 493 { 494 int nr_objs; 495 size_t remained_size; 496 size_t freelist_size; 497 int extra_space = 0; 498 499 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 500 extra_space = sizeof(char); 501 /* 502 * Ignore padding for the initial guess. The padding 503 * is at most @align-1 bytes, and @buffer_size is at 504 * least @align. In the worst case, this result will 505 * be one greater than the number of objects that fit 506 * into the memory allocation when taking the padding 507 * into account. 508 */ 509 nr_objs = slab_size / (buffer_size + idx_size + extra_space); 510 511 /* 512 * This calculated number will be either the right 513 * amount, or one greater than what we want. 514 */ 515 remained_size = slab_size - nr_objs * buffer_size; 516 freelist_size = calculate_freelist_size(nr_objs, align); 517 if (remained_size < freelist_size) 518 nr_objs--; 519 520 return nr_objs; 521 } 522 523 /* 524 * Calculate the number of objects and left-over bytes for a given buffer size. 525 */ 526 static void cache_estimate(unsigned long gfporder, size_t buffer_size, 527 size_t align, int flags, size_t *left_over, 528 unsigned int *num) 529 { 530 int nr_objs; 531 size_t mgmt_size; 532 size_t slab_size = PAGE_SIZE << gfporder; 533 534 /* 535 * The slab management structure can be either off the slab or 536 * on it. For the latter case, the memory allocated for a 537 * slab is used for: 538 * 539 * - One unsigned int for each object 540 * - Padding to respect alignment of @align 541 * - @buffer_size bytes for each object 542 * 543 * If the slab management structure is off the slab, then the 544 * alignment will already be calculated into the size. Because 545 * the slabs are all pages aligned, the objects will be at the 546 * correct alignment when allocated. 547 */ 548 if (flags & CFLGS_OFF_SLAB) { 549 mgmt_size = 0; 550 nr_objs = slab_size / buffer_size; 551 552 } else { 553 nr_objs = calculate_nr_objs(slab_size, buffer_size, 554 sizeof(freelist_idx_t), align); 555 mgmt_size = calculate_freelist_size(nr_objs, align); 556 } 557 *num = nr_objs; 558 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 559 } 560 561 #if DEBUG 562 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 563 564 static void __slab_error(const char *function, struct kmem_cache *cachep, 565 char *msg) 566 { 567 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 568 function, cachep->name, msg); 569 dump_stack(); 570 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 571 } 572 #endif 573 574 /* 575 * By default on NUMA we use alien caches to stage the freeing of 576 * objects allocated from other nodes. This causes massive memory 577 * inefficiencies when using fake NUMA setup to split memory into a 578 * large number of small nodes, so it can be disabled on the command 579 * line 580 */ 581 582 static int use_alien_caches __read_mostly = 1; 583 static int __init noaliencache_setup(char *s) 584 { 585 use_alien_caches = 0; 586 return 1; 587 } 588 __setup("noaliencache", noaliencache_setup); 589 590 static int __init slab_max_order_setup(char *str) 591 { 592 get_option(&str, &slab_max_order); 593 slab_max_order = slab_max_order < 0 ? 0 : 594 min(slab_max_order, MAX_ORDER - 1); 595 slab_max_order_set = true; 596 597 return 1; 598 } 599 __setup("slab_max_order=", slab_max_order_setup); 600 601 #ifdef CONFIG_NUMA 602 /* 603 * Special reaping functions for NUMA systems called from cache_reap(). 604 * These take care of doing round robin flushing of alien caches (containing 605 * objects freed on different nodes from which they were allocated) and the 606 * flushing of remote pcps by calling drain_node_pages. 607 */ 608 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 609 610 static void init_reap_node(int cpu) 611 { 612 int node; 613 614 node = next_node(cpu_to_mem(cpu), node_online_map); 615 if (node == MAX_NUMNODES) 616 node = first_node(node_online_map); 617 618 per_cpu(slab_reap_node, cpu) = node; 619 } 620 621 static void next_reap_node(void) 622 { 623 int node = __this_cpu_read(slab_reap_node); 624 625 node = next_node(node, node_online_map); 626 if (unlikely(node >= MAX_NUMNODES)) 627 node = first_node(node_online_map); 628 __this_cpu_write(slab_reap_node, node); 629 } 630 631 #else 632 #define init_reap_node(cpu) do { } while (0) 633 #define next_reap_node(void) do { } while (0) 634 #endif 635 636 /* 637 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 638 * via the workqueue/eventd. 639 * Add the CPU number into the expiration time to minimize the possibility of 640 * the CPUs getting into lockstep and contending for the global cache chain 641 * lock. 642 */ 643 static void start_cpu_timer(int cpu) 644 { 645 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 646 647 /* 648 * When this gets called from do_initcalls via cpucache_init(), 649 * init_workqueues() has already run, so keventd will be setup 650 * at that time. 651 */ 652 if (keventd_up() && reap_work->work.func == NULL) { 653 init_reap_node(cpu); 654 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 655 schedule_delayed_work_on(cpu, reap_work, 656 __round_jiffies_relative(HZ, cpu)); 657 } 658 } 659 660 static void init_arraycache(struct array_cache *ac, int limit, int batch) 661 { 662 /* 663 * The array_cache structures contain pointers to free object. 664 * However, when such objects are allocated or transferred to another 665 * cache the pointers are not cleared and they could be counted as 666 * valid references during a kmemleak scan. Therefore, kmemleak must 667 * not scan such objects. 668 */ 669 kmemleak_no_scan(ac); 670 if (ac) { 671 ac->avail = 0; 672 ac->limit = limit; 673 ac->batchcount = batch; 674 ac->touched = 0; 675 } 676 } 677 678 static struct array_cache *alloc_arraycache(int node, int entries, 679 int batchcount, gfp_t gfp) 680 { 681 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); 682 struct array_cache *ac = NULL; 683 684 ac = kmalloc_node(memsize, gfp, node); 685 init_arraycache(ac, entries, batchcount); 686 return ac; 687 } 688 689 static inline bool is_slab_pfmemalloc(struct page *page) 690 { 691 return PageSlabPfmemalloc(page); 692 } 693 694 /* Clears pfmemalloc_active if no slabs have pfmalloc set */ 695 static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 696 struct array_cache *ac) 697 { 698 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); 699 struct page *page; 700 unsigned long flags; 701 702 if (!pfmemalloc_active) 703 return; 704 705 spin_lock_irqsave(&n->list_lock, flags); 706 list_for_each_entry(page, &n->slabs_full, lru) 707 if (is_slab_pfmemalloc(page)) 708 goto out; 709 710 list_for_each_entry(page, &n->slabs_partial, lru) 711 if (is_slab_pfmemalloc(page)) 712 goto out; 713 714 list_for_each_entry(page, &n->slabs_free, lru) 715 if (is_slab_pfmemalloc(page)) 716 goto out; 717 718 pfmemalloc_active = false; 719 out: 720 spin_unlock_irqrestore(&n->list_lock, flags); 721 } 722 723 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, 724 gfp_t flags, bool force_refill) 725 { 726 int i; 727 void *objp = ac->entry[--ac->avail]; 728 729 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ 730 if (unlikely(is_obj_pfmemalloc(objp))) { 731 struct kmem_cache_node *n; 732 733 if (gfp_pfmemalloc_allowed(flags)) { 734 clear_obj_pfmemalloc(&objp); 735 return objp; 736 } 737 738 /* The caller cannot use PFMEMALLOC objects, find another one */ 739 for (i = 0; i < ac->avail; i++) { 740 /* If a !PFMEMALLOC object is found, swap them */ 741 if (!is_obj_pfmemalloc(ac->entry[i])) { 742 objp = ac->entry[i]; 743 ac->entry[i] = ac->entry[ac->avail]; 744 ac->entry[ac->avail] = objp; 745 return objp; 746 } 747 } 748 749 /* 750 * If there are empty slabs on the slabs_free list and we are 751 * being forced to refill the cache, mark this one !pfmemalloc. 752 */ 753 n = get_node(cachep, numa_mem_id()); 754 if (!list_empty(&n->slabs_free) && force_refill) { 755 struct page *page = virt_to_head_page(objp); 756 ClearPageSlabPfmemalloc(page); 757 clear_obj_pfmemalloc(&objp); 758 recheck_pfmemalloc_active(cachep, ac); 759 return objp; 760 } 761 762 /* No !PFMEMALLOC objects available */ 763 ac->avail++; 764 objp = NULL; 765 } 766 767 return objp; 768 } 769 770 static inline void *ac_get_obj(struct kmem_cache *cachep, 771 struct array_cache *ac, gfp_t flags, bool force_refill) 772 { 773 void *objp; 774 775 if (unlikely(sk_memalloc_socks())) 776 objp = __ac_get_obj(cachep, ac, flags, force_refill); 777 else 778 objp = ac->entry[--ac->avail]; 779 780 return objp; 781 } 782 783 static noinline void *__ac_put_obj(struct kmem_cache *cachep, 784 struct array_cache *ac, void *objp) 785 { 786 if (unlikely(pfmemalloc_active)) { 787 /* Some pfmemalloc slabs exist, check if this is one */ 788 struct page *page = virt_to_head_page(objp); 789 if (PageSlabPfmemalloc(page)) 790 set_obj_pfmemalloc(&objp); 791 } 792 793 return objp; 794 } 795 796 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 797 void *objp) 798 { 799 if (unlikely(sk_memalloc_socks())) 800 objp = __ac_put_obj(cachep, ac, objp); 801 802 ac->entry[ac->avail++] = objp; 803 } 804 805 /* 806 * Transfer objects in one arraycache to another. 807 * Locking must be handled by the caller. 808 * 809 * Return the number of entries transferred. 810 */ 811 static int transfer_objects(struct array_cache *to, 812 struct array_cache *from, unsigned int max) 813 { 814 /* Figure out how many entries to transfer */ 815 int nr = min3(from->avail, max, to->limit - to->avail); 816 817 if (!nr) 818 return 0; 819 820 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 821 sizeof(void *) *nr); 822 823 from->avail -= nr; 824 to->avail += nr; 825 return nr; 826 } 827 828 #ifndef CONFIG_NUMA 829 830 #define drain_alien_cache(cachep, alien) do { } while (0) 831 #define reap_alien(cachep, n) do { } while (0) 832 833 static inline struct alien_cache **alloc_alien_cache(int node, 834 int limit, gfp_t gfp) 835 { 836 return (struct alien_cache **)BAD_ALIEN_MAGIC; 837 } 838 839 static inline void free_alien_cache(struct alien_cache **ac_ptr) 840 { 841 } 842 843 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 844 { 845 return 0; 846 } 847 848 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 849 gfp_t flags) 850 { 851 return NULL; 852 } 853 854 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 855 gfp_t flags, int nodeid) 856 { 857 return NULL; 858 } 859 860 static inline gfp_t gfp_exact_node(gfp_t flags) 861 { 862 return flags; 863 } 864 865 #else /* CONFIG_NUMA */ 866 867 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 868 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 869 870 static struct alien_cache *__alloc_alien_cache(int node, int entries, 871 int batch, gfp_t gfp) 872 { 873 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); 874 struct alien_cache *alc = NULL; 875 876 alc = kmalloc_node(memsize, gfp, node); 877 init_arraycache(&alc->ac, entries, batch); 878 spin_lock_init(&alc->lock); 879 return alc; 880 } 881 882 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 883 { 884 struct alien_cache **alc_ptr; 885 size_t memsize = sizeof(void *) * nr_node_ids; 886 int i; 887 888 if (limit > 1) 889 limit = 12; 890 alc_ptr = kzalloc_node(memsize, gfp, node); 891 if (!alc_ptr) 892 return NULL; 893 894 for_each_node(i) { 895 if (i == node || !node_online(i)) 896 continue; 897 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); 898 if (!alc_ptr[i]) { 899 for (i--; i >= 0; i--) 900 kfree(alc_ptr[i]); 901 kfree(alc_ptr); 902 return NULL; 903 } 904 } 905 return alc_ptr; 906 } 907 908 static void free_alien_cache(struct alien_cache **alc_ptr) 909 { 910 int i; 911 912 if (!alc_ptr) 913 return; 914 for_each_node(i) 915 kfree(alc_ptr[i]); 916 kfree(alc_ptr); 917 } 918 919 static void __drain_alien_cache(struct kmem_cache *cachep, 920 struct array_cache *ac, int node, 921 struct list_head *list) 922 { 923 struct kmem_cache_node *n = get_node(cachep, node); 924 925 if (ac->avail) { 926 spin_lock(&n->list_lock); 927 /* 928 * Stuff objects into the remote nodes shared array first. 929 * That way we could avoid the overhead of putting the objects 930 * into the free lists and getting them back later. 931 */ 932 if (n->shared) 933 transfer_objects(n->shared, ac, ac->limit); 934 935 free_block(cachep, ac->entry, ac->avail, node, list); 936 ac->avail = 0; 937 spin_unlock(&n->list_lock); 938 } 939 } 940 941 /* 942 * Called from cache_reap() to regularly drain alien caches round robin. 943 */ 944 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 945 { 946 int node = __this_cpu_read(slab_reap_node); 947 948 if (n->alien) { 949 struct alien_cache *alc = n->alien[node]; 950 struct array_cache *ac; 951 952 if (alc) { 953 ac = &alc->ac; 954 if (ac->avail && spin_trylock_irq(&alc->lock)) { 955 LIST_HEAD(list); 956 957 __drain_alien_cache(cachep, ac, node, &list); 958 spin_unlock_irq(&alc->lock); 959 slabs_destroy(cachep, &list); 960 } 961 } 962 } 963 } 964 965 static void drain_alien_cache(struct kmem_cache *cachep, 966 struct alien_cache **alien) 967 { 968 int i = 0; 969 struct alien_cache *alc; 970 struct array_cache *ac; 971 unsigned long flags; 972 973 for_each_online_node(i) { 974 alc = alien[i]; 975 if (alc) { 976 LIST_HEAD(list); 977 978 ac = &alc->ac; 979 spin_lock_irqsave(&alc->lock, flags); 980 __drain_alien_cache(cachep, ac, i, &list); 981 spin_unlock_irqrestore(&alc->lock, flags); 982 slabs_destroy(cachep, &list); 983 } 984 } 985 } 986 987 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, 988 int node, int page_node) 989 { 990 struct kmem_cache_node *n; 991 struct alien_cache *alien = NULL; 992 struct array_cache *ac; 993 LIST_HEAD(list); 994 995 n = get_node(cachep, node); 996 STATS_INC_NODEFREES(cachep); 997 if (n->alien && n->alien[page_node]) { 998 alien = n->alien[page_node]; 999 ac = &alien->ac; 1000 spin_lock(&alien->lock); 1001 if (unlikely(ac->avail == ac->limit)) { 1002 STATS_INC_ACOVERFLOW(cachep); 1003 __drain_alien_cache(cachep, ac, page_node, &list); 1004 } 1005 ac_put_obj(cachep, ac, objp); 1006 spin_unlock(&alien->lock); 1007 slabs_destroy(cachep, &list); 1008 } else { 1009 n = get_node(cachep, page_node); 1010 spin_lock(&n->list_lock); 1011 free_block(cachep, &objp, 1, page_node, &list); 1012 spin_unlock(&n->list_lock); 1013 slabs_destroy(cachep, &list); 1014 } 1015 return 1; 1016 } 1017 1018 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1019 { 1020 int page_node = page_to_nid(virt_to_page(objp)); 1021 int node = numa_mem_id(); 1022 /* 1023 * Make sure we are not freeing a object from another node to the array 1024 * cache on this cpu. 1025 */ 1026 if (likely(node == page_node)) 1027 return 0; 1028 1029 return __cache_free_alien(cachep, objp, node, page_node); 1030 } 1031 1032 /* 1033 * Construct gfp mask to allocate from a specific node but do not invoke reclaim 1034 * or warn about failures. 1035 */ 1036 static inline gfp_t gfp_exact_node(gfp_t flags) 1037 { 1038 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT; 1039 } 1040 #endif 1041 1042 /* 1043 * Allocates and initializes node for a node on each slab cache, used for 1044 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 1045 * will be allocated off-node since memory is not yet online for the new node. 1046 * When hotplugging memory or a cpu, existing node are not replaced if 1047 * already in use. 1048 * 1049 * Must hold slab_mutex. 1050 */ 1051 static int init_cache_node_node(int node) 1052 { 1053 struct kmem_cache *cachep; 1054 struct kmem_cache_node *n; 1055 const size_t memsize = sizeof(struct kmem_cache_node); 1056 1057 list_for_each_entry(cachep, &slab_caches, list) { 1058 /* 1059 * Set up the kmem_cache_node for cpu before we can 1060 * begin anything. Make sure some other cpu on this 1061 * node has not already allocated this 1062 */ 1063 n = get_node(cachep, node); 1064 if (!n) { 1065 n = kmalloc_node(memsize, GFP_KERNEL, node); 1066 if (!n) 1067 return -ENOMEM; 1068 kmem_cache_node_init(n); 1069 n->next_reap = jiffies + REAPTIMEOUT_NODE + 1070 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1071 1072 /* 1073 * The kmem_cache_nodes don't come and go as CPUs 1074 * come and go. slab_mutex is sufficient 1075 * protection here. 1076 */ 1077 cachep->node[node] = n; 1078 } 1079 1080 spin_lock_irq(&n->list_lock); 1081 n->free_limit = 1082 (1 + nr_cpus_node(node)) * 1083 cachep->batchcount + cachep->num; 1084 spin_unlock_irq(&n->list_lock); 1085 } 1086 return 0; 1087 } 1088 1089 static inline int slabs_tofree(struct kmem_cache *cachep, 1090 struct kmem_cache_node *n) 1091 { 1092 return (n->free_objects + cachep->num - 1) / cachep->num; 1093 } 1094 1095 static void cpuup_canceled(long cpu) 1096 { 1097 struct kmem_cache *cachep; 1098 struct kmem_cache_node *n = NULL; 1099 int node = cpu_to_mem(cpu); 1100 const struct cpumask *mask = cpumask_of_node(node); 1101 1102 list_for_each_entry(cachep, &slab_caches, list) { 1103 struct array_cache *nc; 1104 struct array_cache *shared; 1105 struct alien_cache **alien; 1106 LIST_HEAD(list); 1107 1108 n = get_node(cachep, node); 1109 if (!n) 1110 continue; 1111 1112 spin_lock_irq(&n->list_lock); 1113 1114 /* Free limit for this kmem_cache_node */ 1115 n->free_limit -= cachep->batchcount; 1116 1117 /* cpu is dead; no one can alloc from it. */ 1118 nc = per_cpu_ptr(cachep->cpu_cache, cpu); 1119 if (nc) { 1120 free_block(cachep, nc->entry, nc->avail, node, &list); 1121 nc->avail = 0; 1122 } 1123 1124 if (!cpumask_empty(mask)) { 1125 spin_unlock_irq(&n->list_lock); 1126 goto free_slab; 1127 } 1128 1129 shared = n->shared; 1130 if (shared) { 1131 free_block(cachep, shared->entry, 1132 shared->avail, node, &list); 1133 n->shared = NULL; 1134 } 1135 1136 alien = n->alien; 1137 n->alien = NULL; 1138 1139 spin_unlock_irq(&n->list_lock); 1140 1141 kfree(shared); 1142 if (alien) { 1143 drain_alien_cache(cachep, alien); 1144 free_alien_cache(alien); 1145 } 1146 1147 free_slab: 1148 slabs_destroy(cachep, &list); 1149 } 1150 /* 1151 * In the previous loop, all the objects were freed to 1152 * the respective cache's slabs, now we can go ahead and 1153 * shrink each nodelist to its limit. 1154 */ 1155 list_for_each_entry(cachep, &slab_caches, list) { 1156 n = get_node(cachep, node); 1157 if (!n) 1158 continue; 1159 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1160 } 1161 } 1162 1163 static int cpuup_prepare(long cpu) 1164 { 1165 struct kmem_cache *cachep; 1166 struct kmem_cache_node *n = NULL; 1167 int node = cpu_to_mem(cpu); 1168 int err; 1169 1170 /* 1171 * We need to do this right in the beginning since 1172 * alloc_arraycache's are going to use this list. 1173 * kmalloc_node allows us to add the slab to the right 1174 * kmem_cache_node and not this cpu's kmem_cache_node 1175 */ 1176 err = init_cache_node_node(node); 1177 if (err < 0) 1178 goto bad; 1179 1180 /* 1181 * Now we can go ahead with allocating the shared arrays and 1182 * array caches 1183 */ 1184 list_for_each_entry(cachep, &slab_caches, list) { 1185 struct array_cache *shared = NULL; 1186 struct alien_cache **alien = NULL; 1187 1188 if (cachep->shared) { 1189 shared = alloc_arraycache(node, 1190 cachep->shared * cachep->batchcount, 1191 0xbaadf00d, GFP_KERNEL); 1192 if (!shared) 1193 goto bad; 1194 } 1195 if (use_alien_caches) { 1196 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); 1197 if (!alien) { 1198 kfree(shared); 1199 goto bad; 1200 } 1201 } 1202 n = get_node(cachep, node); 1203 BUG_ON(!n); 1204 1205 spin_lock_irq(&n->list_lock); 1206 if (!n->shared) { 1207 /* 1208 * We are serialised from CPU_DEAD or 1209 * CPU_UP_CANCELLED by the cpucontrol lock 1210 */ 1211 n->shared = shared; 1212 shared = NULL; 1213 } 1214 #ifdef CONFIG_NUMA 1215 if (!n->alien) { 1216 n->alien = alien; 1217 alien = NULL; 1218 } 1219 #endif 1220 spin_unlock_irq(&n->list_lock); 1221 kfree(shared); 1222 free_alien_cache(alien); 1223 } 1224 1225 return 0; 1226 bad: 1227 cpuup_canceled(cpu); 1228 return -ENOMEM; 1229 } 1230 1231 static int cpuup_callback(struct notifier_block *nfb, 1232 unsigned long action, void *hcpu) 1233 { 1234 long cpu = (long)hcpu; 1235 int err = 0; 1236 1237 switch (action) { 1238 case CPU_UP_PREPARE: 1239 case CPU_UP_PREPARE_FROZEN: 1240 mutex_lock(&slab_mutex); 1241 err = cpuup_prepare(cpu); 1242 mutex_unlock(&slab_mutex); 1243 break; 1244 case CPU_ONLINE: 1245 case CPU_ONLINE_FROZEN: 1246 start_cpu_timer(cpu); 1247 break; 1248 #ifdef CONFIG_HOTPLUG_CPU 1249 case CPU_DOWN_PREPARE: 1250 case CPU_DOWN_PREPARE_FROZEN: 1251 /* 1252 * Shutdown cache reaper. Note that the slab_mutex is 1253 * held so that if cache_reap() is invoked it cannot do 1254 * anything expensive but will only modify reap_work 1255 * and reschedule the timer. 1256 */ 1257 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1258 /* Now the cache_reaper is guaranteed to be not running. */ 1259 per_cpu(slab_reap_work, cpu).work.func = NULL; 1260 break; 1261 case CPU_DOWN_FAILED: 1262 case CPU_DOWN_FAILED_FROZEN: 1263 start_cpu_timer(cpu); 1264 break; 1265 case CPU_DEAD: 1266 case CPU_DEAD_FROZEN: 1267 /* 1268 * Even if all the cpus of a node are down, we don't free the 1269 * kmem_cache_node of any cache. This to avoid a race between 1270 * cpu_down, and a kmalloc allocation from another cpu for 1271 * memory from the node of the cpu going down. The node 1272 * structure is usually allocated from kmem_cache_create() and 1273 * gets destroyed at kmem_cache_destroy(). 1274 */ 1275 /* fall through */ 1276 #endif 1277 case CPU_UP_CANCELED: 1278 case CPU_UP_CANCELED_FROZEN: 1279 mutex_lock(&slab_mutex); 1280 cpuup_canceled(cpu); 1281 mutex_unlock(&slab_mutex); 1282 break; 1283 } 1284 return notifier_from_errno(err); 1285 } 1286 1287 static struct notifier_block cpucache_notifier = { 1288 &cpuup_callback, NULL, 0 1289 }; 1290 1291 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1292 /* 1293 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1294 * Returns -EBUSY if all objects cannot be drained so that the node is not 1295 * removed. 1296 * 1297 * Must hold slab_mutex. 1298 */ 1299 static int __meminit drain_cache_node_node(int node) 1300 { 1301 struct kmem_cache *cachep; 1302 int ret = 0; 1303 1304 list_for_each_entry(cachep, &slab_caches, list) { 1305 struct kmem_cache_node *n; 1306 1307 n = get_node(cachep, node); 1308 if (!n) 1309 continue; 1310 1311 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1312 1313 if (!list_empty(&n->slabs_full) || 1314 !list_empty(&n->slabs_partial)) { 1315 ret = -EBUSY; 1316 break; 1317 } 1318 } 1319 return ret; 1320 } 1321 1322 static int __meminit slab_memory_callback(struct notifier_block *self, 1323 unsigned long action, void *arg) 1324 { 1325 struct memory_notify *mnb = arg; 1326 int ret = 0; 1327 int nid; 1328 1329 nid = mnb->status_change_nid; 1330 if (nid < 0) 1331 goto out; 1332 1333 switch (action) { 1334 case MEM_GOING_ONLINE: 1335 mutex_lock(&slab_mutex); 1336 ret = init_cache_node_node(nid); 1337 mutex_unlock(&slab_mutex); 1338 break; 1339 case MEM_GOING_OFFLINE: 1340 mutex_lock(&slab_mutex); 1341 ret = drain_cache_node_node(nid); 1342 mutex_unlock(&slab_mutex); 1343 break; 1344 case MEM_ONLINE: 1345 case MEM_OFFLINE: 1346 case MEM_CANCEL_ONLINE: 1347 case MEM_CANCEL_OFFLINE: 1348 break; 1349 } 1350 out: 1351 return notifier_from_errno(ret); 1352 } 1353 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1354 1355 /* 1356 * swap the static kmem_cache_node with kmalloced memory 1357 */ 1358 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1359 int nodeid) 1360 { 1361 struct kmem_cache_node *ptr; 1362 1363 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1364 BUG_ON(!ptr); 1365 1366 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1367 /* 1368 * Do not assume that spinlocks can be initialized via memcpy: 1369 */ 1370 spin_lock_init(&ptr->list_lock); 1371 1372 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1373 cachep->node[nodeid] = ptr; 1374 } 1375 1376 /* 1377 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1378 * size of kmem_cache_node. 1379 */ 1380 static void __init set_up_node(struct kmem_cache *cachep, int index) 1381 { 1382 int node; 1383 1384 for_each_online_node(node) { 1385 cachep->node[node] = &init_kmem_cache_node[index + node]; 1386 cachep->node[node]->next_reap = jiffies + 1387 REAPTIMEOUT_NODE + 1388 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1389 } 1390 } 1391 1392 /* 1393 * Initialisation. Called after the page allocator have been initialised and 1394 * before smp_init(). 1395 */ 1396 void __init kmem_cache_init(void) 1397 { 1398 int i; 1399 1400 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < 1401 sizeof(struct rcu_head)); 1402 kmem_cache = &kmem_cache_boot; 1403 1404 if (num_possible_nodes() == 1) 1405 use_alien_caches = 0; 1406 1407 for (i = 0; i < NUM_INIT_LISTS; i++) 1408 kmem_cache_node_init(&init_kmem_cache_node[i]); 1409 1410 /* 1411 * Fragmentation resistance on low memory - only use bigger 1412 * page orders on machines with more than 32MB of memory if 1413 * not overridden on the command line. 1414 */ 1415 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) 1416 slab_max_order = SLAB_MAX_ORDER_HI; 1417 1418 /* Bootstrap is tricky, because several objects are allocated 1419 * from caches that do not exist yet: 1420 * 1) initialize the kmem_cache cache: it contains the struct 1421 * kmem_cache structures of all caches, except kmem_cache itself: 1422 * kmem_cache is statically allocated. 1423 * Initially an __init data area is used for the head array and the 1424 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1425 * array at the end of the bootstrap. 1426 * 2) Create the first kmalloc cache. 1427 * The struct kmem_cache for the new cache is allocated normally. 1428 * An __init data area is used for the head array. 1429 * 3) Create the remaining kmalloc caches, with minimally sized 1430 * head arrays. 1431 * 4) Replace the __init data head arrays for kmem_cache and the first 1432 * kmalloc cache with kmalloc allocated arrays. 1433 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1434 * the other cache's with kmalloc allocated memory. 1435 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1436 */ 1437 1438 /* 1) create the kmem_cache */ 1439 1440 /* 1441 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1442 */ 1443 create_boot_cache(kmem_cache, "kmem_cache", 1444 offsetof(struct kmem_cache, node) + 1445 nr_node_ids * sizeof(struct kmem_cache_node *), 1446 SLAB_HWCACHE_ALIGN); 1447 list_add(&kmem_cache->list, &slab_caches); 1448 slab_state = PARTIAL; 1449 1450 /* 1451 * Initialize the caches that provide memory for the kmem_cache_node 1452 * structures first. Without this, further allocations will bug. 1453 */ 1454 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node", 1455 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); 1456 slab_state = PARTIAL_NODE; 1457 setup_kmalloc_cache_index_table(); 1458 1459 slab_early_init = 0; 1460 1461 /* 5) Replace the bootstrap kmem_cache_node */ 1462 { 1463 int nid; 1464 1465 for_each_online_node(nid) { 1466 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1467 1468 init_list(kmalloc_caches[INDEX_NODE], 1469 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1470 } 1471 } 1472 1473 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1474 } 1475 1476 void __init kmem_cache_init_late(void) 1477 { 1478 struct kmem_cache *cachep; 1479 1480 slab_state = UP; 1481 1482 /* 6) resize the head arrays to their final sizes */ 1483 mutex_lock(&slab_mutex); 1484 list_for_each_entry(cachep, &slab_caches, list) 1485 if (enable_cpucache(cachep, GFP_NOWAIT)) 1486 BUG(); 1487 mutex_unlock(&slab_mutex); 1488 1489 /* Done! */ 1490 slab_state = FULL; 1491 1492 /* 1493 * Register a cpu startup notifier callback that initializes 1494 * cpu_cache_get for all new cpus 1495 */ 1496 register_cpu_notifier(&cpucache_notifier); 1497 1498 #ifdef CONFIG_NUMA 1499 /* 1500 * Register a memory hotplug callback that initializes and frees 1501 * node. 1502 */ 1503 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1504 #endif 1505 1506 /* 1507 * The reap timers are started later, with a module init call: That part 1508 * of the kernel is not yet operational. 1509 */ 1510 } 1511 1512 static int __init cpucache_init(void) 1513 { 1514 int cpu; 1515 1516 /* 1517 * Register the timers that return unneeded pages to the page allocator 1518 */ 1519 for_each_online_cpu(cpu) 1520 start_cpu_timer(cpu); 1521 1522 /* Done! */ 1523 slab_state = FULL; 1524 return 0; 1525 } 1526 __initcall(cpucache_init); 1527 1528 static noinline void 1529 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1530 { 1531 #if DEBUG 1532 struct kmem_cache_node *n; 1533 struct page *page; 1534 unsigned long flags; 1535 int node; 1536 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 1537 DEFAULT_RATELIMIT_BURST); 1538 1539 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) 1540 return; 1541 1542 printk(KERN_WARNING 1543 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1544 nodeid, gfpflags); 1545 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", 1546 cachep->name, cachep->size, cachep->gfporder); 1547 1548 for_each_kmem_cache_node(cachep, node, n) { 1549 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1550 unsigned long active_slabs = 0, num_slabs = 0; 1551 1552 spin_lock_irqsave(&n->list_lock, flags); 1553 list_for_each_entry(page, &n->slabs_full, lru) { 1554 active_objs += cachep->num; 1555 active_slabs++; 1556 } 1557 list_for_each_entry(page, &n->slabs_partial, lru) { 1558 active_objs += page->active; 1559 active_slabs++; 1560 } 1561 list_for_each_entry(page, &n->slabs_free, lru) 1562 num_slabs++; 1563 1564 free_objects += n->free_objects; 1565 spin_unlock_irqrestore(&n->list_lock, flags); 1566 1567 num_slabs += active_slabs; 1568 num_objs = num_slabs * cachep->num; 1569 printk(KERN_WARNING 1570 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", 1571 node, active_slabs, num_slabs, active_objs, num_objs, 1572 free_objects); 1573 } 1574 #endif 1575 } 1576 1577 /* 1578 * Interface to system's page allocator. No need to hold the 1579 * kmem_cache_node ->list_lock. 1580 * 1581 * If we requested dmaable memory, we will get it. Even if we 1582 * did not request dmaable memory, we might get it, but that 1583 * would be relatively rare and ignorable. 1584 */ 1585 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1586 int nodeid) 1587 { 1588 struct page *page; 1589 int nr_pages; 1590 1591 flags |= cachep->allocflags; 1592 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1593 flags |= __GFP_RECLAIMABLE; 1594 1595 if (memcg_charge_slab(cachep, flags, cachep->gfporder)) 1596 return NULL; 1597 1598 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1599 if (!page) { 1600 memcg_uncharge_slab(cachep, cachep->gfporder); 1601 slab_out_of_memory(cachep, flags, nodeid); 1602 return NULL; 1603 } 1604 1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1606 if (page_is_pfmemalloc(page)) 1607 pfmemalloc_active = true; 1608 1609 nr_pages = (1 << cachep->gfporder); 1610 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1611 add_zone_page_state(page_zone(page), 1612 NR_SLAB_RECLAIMABLE, nr_pages); 1613 else 1614 add_zone_page_state(page_zone(page), 1615 NR_SLAB_UNRECLAIMABLE, nr_pages); 1616 __SetPageSlab(page); 1617 if (page_is_pfmemalloc(page)) 1618 SetPageSlabPfmemalloc(page); 1619 1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1621 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1622 1623 if (cachep->ctor) 1624 kmemcheck_mark_uninitialized_pages(page, nr_pages); 1625 else 1626 kmemcheck_mark_unallocated_pages(page, nr_pages); 1627 } 1628 1629 return page; 1630 } 1631 1632 /* 1633 * Interface to system's page release. 1634 */ 1635 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1636 { 1637 const unsigned long nr_freed = (1 << cachep->gfporder); 1638 1639 kmemcheck_free_shadow(page, cachep->gfporder); 1640 1641 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1642 sub_zone_page_state(page_zone(page), 1643 NR_SLAB_RECLAIMABLE, nr_freed); 1644 else 1645 sub_zone_page_state(page_zone(page), 1646 NR_SLAB_UNRECLAIMABLE, nr_freed); 1647 1648 BUG_ON(!PageSlab(page)); 1649 __ClearPageSlabPfmemalloc(page); 1650 __ClearPageSlab(page); 1651 page_mapcount_reset(page); 1652 page->mapping = NULL; 1653 1654 if (current->reclaim_state) 1655 current->reclaim_state->reclaimed_slab += nr_freed; 1656 __free_pages(page, cachep->gfporder); 1657 memcg_uncharge_slab(cachep, cachep->gfporder); 1658 } 1659 1660 static void kmem_rcu_free(struct rcu_head *head) 1661 { 1662 struct kmem_cache *cachep; 1663 struct page *page; 1664 1665 page = container_of(head, struct page, rcu_head); 1666 cachep = page->slab_cache; 1667 1668 kmem_freepages(cachep, page); 1669 } 1670 1671 #if DEBUG 1672 1673 #ifdef CONFIG_DEBUG_PAGEALLOC 1674 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1675 unsigned long caller) 1676 { 1677 int size = cachep->object_size; 1678 1679 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1680 1681 if (size < 5 * sizeof(unsigned long)) 1682 return; 1683 1684 *addr++ = 0x12345678; 1685 *addr++ = caller; 1686 *addr++ = smp_processor_id(); 1687 size -= 3 * sizeof(unsigned long); 1688 { 1689 unsigned long *sptr = &caller; 1690 unsigned long svalue; 1691 1692 while (!kstack_end(sptr)) { 1693 svalue = *sptr++; 1694 if (kernel_text_address(svalue)) { 1695 *addr++ = svalue; 1696 size -= sizeof(unsigned long); 1697 if (size <= sizeof(unsigned long)) 1698 break; 1699 } 1700 } 1701 1702 } 1703 *addr++ = 0x87654321; 1704 } 1705 #endif 1706 1707 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1708 { 1709 int size = cachep->object_size; 1710 addr = &((char *)addr)[obj_offset(cachep)]; 1711 1712 memset(addr, val, size); 1713 *(unsigned char *)(addr + size - 1) = POISON_END; 1714 } 1715 1716 static void dump_line(char *data, int offset, int limit) 1717 { 1718 int i; 1719 unsigned char error = 0; 1720 int bad_count = 0; 1721 1722 printk(KERN_ERR "%03x: ", offset); 1723 for (i = 0; i < limit; i++) { 1724 if (data[offset + i] != POISON_FREE) { 1725 error = data[offset + i]; 1726 bad_count++; 1727 } 1728 } 1729 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1730 &data[offset], limit, 1); 1731 1732 if (bad_count == 1) { 1733 error ^= POISON_FREE; 1734 if (!(error & (error - 1))) { 1735 printk(KERN_ERR "Single bit error detected. Probably " 1736 "bad RAM.\n"); 1737 #ifdef CONFIG_X86 1738 printk(KERN_ERR "Run memtest86+ or a similar memory " 1739 "test tool.\n"); 1740 #else 1741 printk(KERN_ERR "Run a memory test tool.\n"); 1742 #endif 1743 } 1744 } 1745 } 1746 #endif 1747 1748 #if DEBUG 1749 1750 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1751 { 1752 int i, size; 1753 char *realobj; 1754 1755 if (cachep->flags & SLAB_RED_ZONE) { 1756 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1757 *dbg_redzone1(cachep, objp), 1758 *dbg_redzone2(cachep, objp)); 1759 } 1760 1761 if (cachep->flags & SLAB_STORE_USER) { 1762 printk(KERN_ERR "Last user: [<%p>](%pSR)\n", 1763 *dbg_userword(cachep, objp), 1764 *dbg_userword(cachep, objp)); 1765 } 1766 realobj = (char *)objp + obj_offset(cachep); 1767 size = cachep->object_size; 1768 for (i = 0; i < size && lines; i += 16, lines--) { 1769 int limit; 1770 limit = 16; 1771 if (i + limit > size) 1772 limit = size - i; 1773 dump_line(realobj, i, limit); 1774 } 1775 } 1776 1777 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1778 { 1779 char *realobj; 1780 int size, i; 1781 int lines = 0; 1782 1783 realobj = (char *)objp + obj_offset(cachep); 1784 size = cachep->object_size; 1785 1786 for (i = 0; i < size; i++) { 1787 char exp = POISON_FREE; 1788 if (i == size - 1) 1789 exp = POISON_END; 1790 if (realobj[i] != exp) { 1791 int limit; 1792 /* Mismatch ! */ 1793 /* Print header */ 1794 if (lines == 0) { 1795 printk(KERN_ERR 1796 "Slab corruption (%s): %s start=%p, len=%d\n", 1797 print_tainted(), cachep->name, realobj, size); 1798 print_objinfo(cachep, objp, 0); 1799 } 1800 /* Hexdump the affected line */ 1801 i = (i / 16) * 16; 1802 limit = 16; 1803 if (i + limit > size) 1804 limit = size - i; 1805 dump_line(realobj, i, limit); 1806 i += 16; 1807 lines++; 1808 /* Limit to 5 lines */ 1809 if (lines > 5) 1810 break; 1811 } 1812 } 1813 if (lines != 0) { 1814 /* Print some data about the neighboring objects, if they 1815 * exist: 1816 */ 1817 struct page *page = virt_to_head_page(objp); 1818 unsigned int objnr; 1819 1820 objnr = obj_to_index(cachep, page, objp); 1821 if (objnr) { 1822 objp = index_to_obj(cachep, page, objnr - 1); 1823 realobj = (char *)objp + obj_offset(cachep); 1824 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1825 realobj, size); 1826 print_objinfo(cachep, objp, 2); 1827 } 1828 if (objnr + 1 < cachep->num) { 1829 objp = index_to_obj(cachep, page, objnr + 1); 1830 realobj = (char *)objp + obj_offset(cachep); 1831 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1832 realobj, size); 1833 print_objinfo(cachep, objp, 2); 1834 } 1835 } 1836 } 1837 #endif 1838 1839 #if DEBUG 1840 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1841 struct page *page) 1842 { 1843 int i; 1844 for (i = 0; i < cachep->num; i++) { 1845 void *objp = index_to_obj(cachep, page, i); 1846 1847 if (cachep->flags & SLAB_POISON) { 1848 #ifdef CONFIG_DEBUG_PAGEALLOC 1849 if (cachep->size % PAGE_SIZE == 0 && 1850 OFF_SLAB(cachep)) 1851 kernel_map_pages(virt_to_page(objp), 1852 cachep->size / PAGE_SIZE, 1); 1853 else 1854 check_poison_obj(cachep, objp); 1855 #else 1856 check_poison_obj(cachep, objp); 1857 #endif 1858 } 1859 if (cachep->flags & SLAB_RED_ZONE) { 1860 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1861 slab_error(cachep, "start of a freed object " 1862 "was overwritten"); 1863 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1864 slab_error(cachep, "end of a freed object " 1865 "was overwritten"); 1866 } 1867 } 1868 } 1869 #else 1870 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1871 struct page *page) 1872 { 1873 } 1874 #endif 1875 1876 /** 1877 * slab_destroy - destroy and release all objects in a slab 1878 * @cachep: cache pointer being destroyed 1879 * @page: page pointer being destroyed 1880 * 1881 * Destroy all the objs in a slab page, and release the mem back to the system. 1882 * Before calling the slab page must have been unlinked from the cache. The 1883 * kmem_cache_node ->list_lock is not held/needed. 1884 */ 1885 static void slab_destroy(struct kmem_cache *cachep, struct page *page) 1886 { 1887 void *freelist; 1888 1889 freelist = page->freelist; 1890 slab_destroy_debugcheck(cachep, page); 1891 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1892 struct rcu_head *head; 1893 1894 /* 1895 * RCU free overloads the RCU head over the LRU. 1896 * slab_page has been overloeaded over the LRU, 1897 * however it is not used from now on so that 1898 * we can use it safely. 1899 */ 1900 head = (void *)&page->rcu_head; 1901 call_rcu(head, kmem_rcu_free); 1902 1903 } else { 1904 kmem_freepages(cachep, page); 1905 } 1906 1907 /* 1908 * From now on, we don't use freelist 1909 * although actual page can be freed in rcu context 1910 */ 1911 if (OFF_SLAB(cachep)) 1912 kmem_cache_free(cachep->freelist_cache, freelist); 1913 } 1914 1915 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) 1916 { 1917 struct page *page, *n; 1918 1919 list_for_each_entry_safe(page, n, list, lru) { 1920 list_del(&page->lru); 1921 slab_destroy(cachep, page); 1922 } 1923 } 1924 1925 /** 1926 * calculate_slab_order - calculate size (page order) of slabs 1927 * @cachep: pointer to the cache that is being created 1928 * @size: size of objects to be created in this cache. 1929 * @align: required alignment for the objects. 1930 * @flags: slab allocation flags 1931 * 1932 * Also calculates the number of objects per slab. 1933 * 1934 * This could be made much more intelligent. For now, try to avoid using 1935 * high order pages for slabs. When the gfp() functions are more friendly 1936 * towards high-order requests, this should be changed. 1937 */ 1938 static size_t calculate_slab_order(struct kmem_cache *cachep, 1939 size_t size, size_t align, unsigned long flags) 1940 { 1941 unsigned long offslab_limit; 1942 size_t left_over = 0; 1943 int gfporder; 1944 1945 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1946 unsigned int num; 1947 size_t remainder; 1948 1949 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1950 if (!num) 1951 continue; 1952 1953 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 1954 if (num > SLAB_OBJ_MAX_NUM) 1955 break; 1956 1957 if (flags & CFLGS_OFF_SLAB) { 1958 size_t freelist_size_per_obj = sizeof(freelist_idx_t); 1959 /* 1960 * Max number of objs-per-slab for caches which 1961 * use off-slab slabs. Needed to avoid a possible 1962 * looping condition in cache_grow(). 1963 */ 1964 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 1965 freelist_size_per_obj += sizeof(char); 1966 offslab_limit = size; 1967 offslab_limit /= freelist_size_per_obj; 1968 1969 if (num > offslab_limit) 1970 break; 1971 } 1972 1973 /* Found something acceptable - save it away */ 1974 cachep->num = num; 1975 cachep->gfporder = gfporder; 1976 left_over = remainder; 1977 1978 /* 1979 * A VFS-reclaimable slab tends to have most allocations 1980 * as GFP_NOFS and we really don't want to have to be allocating 1981 * higher-order pages when we are unable to shrink dcache. 1982 */ 1983 if (flags & SLAB_RECLAIM_ACCOUNT) 1984 break; 1985 1986 /* 1987 * Large number of objects is good, but very large slabs are 1988 * currently bad for the gfp()s. 1989 */ 1990 if (gfporder >= slab_max_order) 1991 break; 1992 1993 /* 1994 * Acceptable internal fragmentation? 1995 */ 1996 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1997 break; 1998 } 1999 return left_over; 2000 } 2001 2002 static struct array_cache __percpu *alloc_kmem_cache_cpus( 2003 struct kmem_cache *cachep, int entries, int batchcount) 2004 { 2005 int cpu; 2006 size_t size; 2007 struct array_cache __percpu *cpu_cache; 2008 2009 size = sizeof(void *) * entries + sizeof(struct array_cache); 2010 cpu_cache = __alloc_percpu(size, sizeof(void *)); 2011 2012 if (!cpu_cache) 2013 return NULL; 2014 2015 for_each_possible_cpu(cpu) { 2016 init_arraycache(per_cpu_ptr(cpu_cache, cpu), 2017 entries, batchcount); 2018 } 2019 2020 return cpu_cache; 2021 } 2022 2023 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2024 { 2025 if (slab_state >= FULL) 2026 return enable_cpucache(cachep, gfp); 2027 2028 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); 2029 if (!cachep->cpu_cache) 2030 return 1; 2031 2032 if (slab_state == DOWN) { 2033 /* Creation of first cache (kmem_cache). */ 2034 set_up_node(kmem_cache, CACHE_CACHE); 2035 } else if (slab_state == PARTIAL) { 2036 /* For kmem_cache_node */ 2037 set_up_node(cachep, SIZE_NODE); 2038 } else { 2039 int node; 2040 2041 for_each_online_node(node) { 2042 cachep->node[node] = kmalloc_node( 2043 sizeof(struct kmem_cache_node), gfp, node); 2044 BUG_ON(!cachep->node[node]); 2045 kmem_cache_node_init(cachep->node[node]); 2046 } 2047 } 2048 2049 cachep->node[numa_mem_id()]->next_reap = 2050 jiffies + REAPTIMEOUT_NODE + 2051 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 2052 2053 cpu_cache_get(cachep)->avail = 0; 2054 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2055 cpu_cache_get(cachep)->batchcount = 1; 2056 cpu_cache_get(cachep)->touched = 0; 2057 cachep->batchcount = 1; 2058 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2059 return 0; 2060 } 2061 2062 unsigned long kmem_cache_flags(unsigned long object_size, 2063 unsigned long flags, const char *name, 2064 void (*ctor)(void *)) 2065 { 2066 return flags; 2067 } 2068 2069 struct kmem_cache * 2070 __kmem_cache_alias(const char *name, size_t size, size_t align, 2071 unsigned long flags, void (*ctor)(void *)) 2072 { 2073 struct kmem_cache *cachep; 2074 2075 cachep = find_mergeable(size, align, flags, name, ctor); 2076 if (cachep) { 2077 cachep->refcount++; 2078 2079 /* 2080 * Adjust the object sizes so that we clear 2081 * the complete object on kzalloc. 2082 */ 2083 cachep->object_size = max_t(int, cachep->object_size, size); 2084 } 2085 return cachep; 2086 } 2087 2088 /** 2089 * __kmem_cache_create - Create a cache. 2090 * @cachep: cache management descriptor 2091 * @flags: SLAB flags 2092 * 2093 * Returns a ptr to the cache on success, NULL on failure. 2094 * Cannot be called within a int, but can be interrupted. 2095 * The @ctor is run when new pages are allocated by the cache. 2096 * 2097 * The flags are 2098 * 2099 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2100 * to catch references to uninitialised memory. 2101 * 2102 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2103 * for buffer overruns. 2104 * 2105 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2106 * cacheline. This can be beneficial if you're counting cycles as closely 2107 * as davem. 2108 */ 2109 int 2110 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2111 { 2112 size_t left_over, freelist_size; 2113 size_t ralign = BYTES_PER_WORD; 2114 gfp_t gfp; 2115 int err; 2116 size_t size = cachep->size; 2117 2118 #if DEBUG 2119 #if FORCED_DEBUG 2120 /* 2121 * Enable redzoning and last user accounting, except for caches with 2122 * large objects, if the increased size would increase the object size 2123 * above the next power of two: caches with object sizes just above a 2124 * power of two have a significant amount of internal fragmentation. 2125 */ 2126 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2127 2 * sizeof(unsigned long long))) 2128 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2129 if (!(flags & SLAB_DESTROY_BY_RCU)) 2130 flags |= SLAB_POISON; 2131 #endif 2132 if (flags & SLAB_DESTROY_BY_RCU) 2133 BUG_ON(flags & SLAB_POISON); 2134 #endif 2135 2136 /* 2137 * Check that size is in terms of words. This is needed to avoid 2138 * unaligned accesses for some archs when redzoning is used, and makes 2139 * sure any on-slab bufctl's are also correctly aligned. 2140 */ 2141 if (size & (BYTES_PER_WORD - 1)) { 2142 size += (BYTES_PER_WORD - 1); 2143 size &= ~(BYTES_PER_WORD - 1); 2144 } 2145 2146 if (flags & SLAB_RED_ZONE) { 2147 ralign = REDZONE_ALIGN; 2148 /* If redzoning, ensure that the second redzone is suitably 2149 * aligned, by adjusting the object size accordingly. */ 2150 size += REDZONE_ALIGN - 1; 2151 size &= ~(REDZONE_ALIGN - 1); 2152 } 2153 2154 /* 3) caller mandated alignment */ 2155 if (ralign < cachep->align) { 2156 ralign = cachep->align; 2157 } 2158 /* disable debug if necessary */ 2159 if (ralign > __alignof__(unsigned long long)) 2160 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2161 /* 2162 * 4) Store it. 2163 */ 2164 cachep->align = ralign; 2165 2166 if (slab_is_available()) 2167 gfp = GFP_KERNEL; 2168 else 2169 gfp = GFP_NOWAIT; 2170 2171 #if DEBUG 2172 2173 /* 2174 * Both debugging options require word-alignment which is calculated 2175 * into align above. 2176 */ 2177 if (flags & SLAB_RED_ZONE) { 2178 /* add space for red zone words */ 2179 cachep->obj_offset += sizeof(unsigned long long); 2180 size += 2 * sizeof(unsigned long long); 2181 } 2182 if (flags & SLAB_STORE_USER) { 2183 /* user store requires one word storage behind the end of 2184 * the real object. But if the second red zone needs to be 2185 * aligned to 64 bits, we must allow that much space. 2186 */ 2187 if (flags & SLAB_RED_ZONE) 2188 size += REDZONE_ALIGN; 2189 else 2190 size += BYTES_PER_WORD; 2191 } 2192 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2193 /* 2194 * To activate debug pagealloc, off-slab management is necessary 2195 * requirement. In early phase of initialization, small sized slab 2196 * doesn't get initialized so it would not be possible. So, we need 2197 * to check size >= 256. It guarantees that all necessary small 2198 * sized slab is initialized in current slab initialization sequence. 2199 */ 2200 if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && 2201 size >= 256 && cachep->object_size > cache_line_size() && 2202 ALIGN(size, cachep->align) < PAGE_SIZE) { 2203 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2204 size = PAGE_SIZE; 2205 } 2206 #endif 2207 #endif 2208 2209 /* 2210 * Determine if the slab management is 'on' or 'off' slab. 2211 * (bootstrapping cannot cope with offslab caches so don't do 2212 * it too early on. Always use on-slab management when 2213 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) 2214 */ 2215 if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init && 2216 !(flags & SLAB_NOLEAKTRACE)) 2217 /* 2218 * Size is large, assume best to place the slab management obj 2219 * off-slab (should allow better packing of objs). 2220 */ 2221 flags |= CFLGS_OFF_SLAB; 2222 2223 size = ALIGN(size, cachep->align); 2224 /* 2225 * We should restrict the number of objects in a slab to implement 2226 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2227 */ 2228 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2229 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2230 2231 left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2232 2233 if (!cachep->num) 2234 return -E2BIG; 2235 2236 freelist_size = calculate_freelist_size(cachep->num, cachep->align); 2237 2238 /* 2239 * If the slab has been placed off-slab, and we have enough space then 2240 * move it on-slab. This is at the expense of any extra colouring. 2241 */ 2242 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { 2243 flags &= ~CFLGS_OFF_SLAB; 2244 left_over -= freelist_size; 2245 } 2246 2247 if (flags & CFLGS_OFF_SLAB) { 2248 /* really off slab. No need for manual alignment */ 2249 freelist_size = calculate_freelist_size(cachep->num, 0); 2250 2251 #ifdef CONFIG_PAGE_POISONING 2252 /* If we're going to use the generic kernel_map_pages() 2253 * poisoning, then it's going to smash the contents of 2254 * the redzone and userword anyhow, so switch them off. 2255 */ 2256 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) 2257 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2258 #endif 2259 } 2260 2261 cachep->colour_off = cache_line_size(); 2262 /* Offset must be a multiple of the alignment. */ 2263 if (cachep->colour_off < cachep->align) 2264 cachep->colour_off = cachep->align; 2265 cachep->colour = left_over / cachep->colour_off; 2266 cachep->freelist_size = freelist_size; 2267 cachep->flags = flags; 2268 cachep->allocflags = __GFP_COMP; 2269 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2270 cachep->allocflags |= GFP_DMA; 2271 cachep->size = size; 2272 cachep->reciprocal_buffer_size = reciprocal_value(size); 2273 2274 if (flags & CFLGS_OFF_SLAB) { 2275 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2276 /* 2277 * This is a possibility for one of the kmalloc_{dma,}_caches. 2278 * But since we go off slab only for object size greater than 2279 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created 2280 * in ascending order,this should not happen at all. 2281 * But leave a BUG_ON for some lucky dude. 2282 */ 2283 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); 2284 } 2285 2286 err = setup_cpu_cache(cachep, gfp); 2287 if (err) { 2288 __kmem_cache_shutdown(cachep); 2289 return err; 2290 } 2291 2292 return 0; 2293 } 2294 2295 #if DEBUG 2296 static void check_irq_off(void) 2297 { 2298 BUG_ON(!irqs_disabled()); 2299 } 2300 2301 static void check_irq_on(void) 2302 { 2303 BUG_ON(irqs_disabled()); 2304 } 2305 2306 static void check_spinlock_acquired(struct kmem_cache *cachep) 2307 { 2308 #ifdef CONFIG_SMP 2309 check_irq_off(); 2310 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); 2311 #endif 2312 } 2313 2314 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2315 { 2316 #ifdef CONFIG_SMP 2317 check_irq_off(); 2318 assert_spin_locked(&get_node(cachep, node)->list_lock); 2319 #endif 2320 } 2321 2322 #else 2323 #define check_irq_off() do { } while(0) 2324 #define check_irq_on() do { } while(0) 2325 #define check_spinlock_acquired(x) do { } while(0) 2326 #define check_spinlock_acquired_node(x, y) do { } while(0) 2327 #endif 2328 2329 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 2330 struct array_cache *ac, 2331 int force, int node); 2332 2333 static void do_drain(void *arg) 2334 { 2335 struct kmem_cache *cachep = arg; 2336 struct array_cache *ac; 2337 int node = numa_mem_id(); 2338 struct kmem_cache_node *n; 2339 LIST_HEAD(list); 2340 2341 check_irq_off(); 2342 ac = cpu_cache_get(cachep); 2343 n = get_node(cachep, node); 2344 spin_lock(&n->list_lock); 2345 free_block(cachep, ac->entry, ac->avail, node, &list); 2346 spin_unlock(&n->list_lock); 2347 slabs_destroy(cachep, &list); 2348 ac->avail = 0; 2349 } 2350 2351 static void drain_cpu_caches(struct kmem_cache *cachep) 2352 { 2353 struct kmem_cache_node *n; 2354 int node; 2355 2356 on_each_cpu(do_drain, cachep, 1); 2357 check_irq_on(); 2358 for_each_kmem_cache_node(cachep, node, n) 2359 if (n->alien) 2360 drain_alien_cache(cachep, n->alien); 2361 2362 for_each_kmem_cache_node(cachep, node, n) 2363 drain_array(cachep, n, n->shared, 1, node); 2364 } 2365 2366 /* 2367 * Remove slabs from the list of free slabs. 2368 * Specify the number of slabs to drain in tofree. 2369 * 2370 * Returns the actual number of slabs released. 2371 */ 2372 static int drain_freelist(struct kmem_cache *cache, 2373 struct kmem_cache_node *n, int tofree) 2374 { 2375 struct list_head *p; 2376 int nr_freed; 2377 struct page *page; 2378 2379 nr_freed = 0; 2380 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2381 2382 spin_lock_irq(&n->list_lock); 2383 p = n->slabs_free.prev; 2384 if (p == &n->slabs_free) { 2385 spin_unlock_irq(&n->list_lock); 2386 goto out; 2387 } 2388 2389 page = list_entry(p, struct page, lru); 2390 #if DEBUG 2391 BUG_ON(page->active); 2392 #endif 2393 list_del(&page->lru); 2394 /* 2395 * Safe to drop the lock. The slab is no longer linked 2396 * to the cache. 2397 */ 2398 n->free_objects -= cache->num; 2399 spin_unlock_irq(&n->list_lock); 2400 slab_destroy(cache, page); 2401 nr_freed++; 2402 } 2403 out: 2404 return nr_freed; 2405 } 2406 2407 int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) 2408 { 2409 int ret = 0; 2410 int node; 2411 struct kmem_cache_node *n; 2412 2413 drain_cpu_caches(cachep); 2414 2415 check_irq_on(); 2416 for_each_kmem_cache_node(cachep, node, n) { 2417 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 2418 2419 ret += !list_empty(&n->slabs_full) || 2420 !list_empty(&n->slabs_partial); 2421 } 2422 return (ret ? 1 : 0); 2423 } 2424 2425 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2426 { 2427 int i; 2428 struct kmem_cache_node *n; 2429 int rc = __kmem_cache_shrink(cachep, false); 2430 2431 if (rc) 2432 return rc; 2433 2434 free_percpu(cachep->cpu_cache); 2435 2436 /* NUMA: free the node structures */ 2437 for_each_kmem_cache_node(cachep, i, n) { 2438 kfree(n->shared); 2439 free_alien_cache(n->alien); 2440 kfree(n); 2441 cachep->node[i] = NULL; 2442 } 2443 return 0; 2444 } 2445 2446 /* 2447 * Get the memory for a slab management obj. 2448 * 2449 * For a slab cache when the slab descriptor is off-slab, the 2450 * slab descriptor can't come from the same cache which is being created, 2451 * Because if it is the case, that means we defer the creation of 2452 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2453 * And we eventually call down to __kmem_cache_create(), which 2454 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. 2455 * This is a "chicken-and-egg" problem. 2456 * 2457 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2458 * which are all initialized during kmem_cache_init(). 2459 */ 2460 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2461 struct page *page, int colour_off, 2462 gfp_t local_flags, int nodeid) 2463 { 2464 void *freelist; 2465 void *addr = page_address(page); 2466 2467 if (OFF_SLAB(cachep)) { 2468 /* Slab management obj is off-slab. */ 2469 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2470 local_flags, nodeid); 2471 if (!freelist) 2472 return NULL; 2473 } else { 2474 freelist = addr + colour_off; 2475 colour_off += cachep->freelist_size; 2476 } 2477 page->active = 0; 2478 page->s_mem = addr + colour_off; 2479 return freelist; 2480 } 2481 2482 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) 2483 { 2484 return ((freelist_idx_t *)page->freelist)[idx]; 2485 } 2486 2487 static inline void set_free_obj(struct page *page, 2488 unsigned int idx, freelist_idx_t val) 2489 { 2490 ((freelist_idx_t *)(page->freelist))[idx] = val; 2491 } 2492 2493 static void cache_init_objs(struct kmem_cache *cachep, 2494 struct page *page) 2495 { 2496 int i; 2497 2498 for (i = 0; i < cachep->num; i++) { 2499 void *objp = index_to_obj(cachep, page, i); 2500 #if DEBUG 2501 /* need to poison the objs? */ 2502 if (cachep->flags & SLAB_POISON) 2503 poison_obj(cachep, objp, POISON_FREE); 2504 if (cachep->flags & SLAB_STORE_USER) 2505 *dbg_userword(cachep, objp) = NULL; 2506 2507 if (cachep->flags & SLAB_RED_ZONE) { 2508 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2509 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2510 } 2511 /* 2512 * Constructors are not allowed to allocate memory from the same 2513 * cache which they are a constructor for. Otherwise, deadlock. 2514 * They must also be threaded. 2515 */ 2516 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2517 cachep->ctor(objp + obj_offset(cachep)); 2518 2519 if (cachep->flags & SLAB_RED_ZONE) { 2520 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2521 slab_error(cachep, "constructor overwrote the" 2522 " end of an object"); 2523 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2524 slab_error(cachep, "constructor overwrote the" 2525 " start of an object"); 2526 } 2527 if ((cachep->size % PAGE_SIZE) == 0 && 2528 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2529 kernel_map_pages(virt_to_page(objp), 2530 cachep->size / PAGE_SIZE, 0); 2531 #else 2532 if (cachep->ctor) 2533 cachep->ctor(objp); 2534 #endif 2535 set_obj_status(page, i, OBJECT_FREE); 2536 set_free_obj(page, i, i); 2537 } 2538 } 2539 2540 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2541 { 2542 if (CONFIG_ZONE_DMA_FLAG) { 2543 if (flags & GFP_DMA) 2544 BUG_ON(!(cachep->allocflags & GFP_DMA)); 2545 else 2546 BUG_ON(cachep->allocflags & GFP_DMA); 2547 } 2548 } 2549 2550 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, 2551 int nodeid) 2552 { 2553 void *objp; 2554 2555 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); 2556 page->active++; 2557 #if DEBUG 2558 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2559 #endif 2560 2561 return objp; 2562 } 2563 2564 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, 2565 void *objp, int nodeid) 2566 { 2567 unsigned int objnr = obj_to_index(cachep, page, objp); 2568 #if DEBUG 2569 unsigned int i; 2570 2571 /* Verify that the slab belongs to the intended node */ 2572 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2573 2574 /* Verify double free bug */ 2575 for (i = page->active; i < cachep->num; i++) { 2576 if (get_free_obj(page, i) == objnr) { 2577 printk(KERN_ERR "slab: double free detected in cache " 2578 "'%s', objp %p\n", cachep->name, objp); 2579 BUG(); 2580 } 2581 } 2582 #endif 2583 page->active--; 2584 set_free_obj(page, page->active, objnr); 2585 } 2586 2587 /* 2588 * Map pages beginning at addr to the given cache and slab. This is required 2589 * for the slab allocator to be able to lookup the cache and slab of a 2590 * virtual address for kfree, ksize, and slab debugging. 2591 */ 2592 static void slab_map_pages(struct kmem_cache *cache, struct page *page, 2593 void *freelist) 2594 { 2595 page->slab_cache = cache; 2596 page->freelist = freelist; 2597 } 2598 2599 /* 2600 * Grow (by 1) the number of slabs within a cache. This is called by 2601 * kmem_cache_alloc() when there are no active objs left in a cache. 2602 */ 2603 static int cache_grow(struct kmem_cache *cachep, 2604 gfp_t flags, int nodeid, struct page *page) 2605 { 2606 void *freelist; 2607 size_t offset; 2608 gfp_t local_flags; 2609 struct kmem_cache_node *n; 2610 2611 /* 2612 * Be lazy and only check for valid flags here, keeping it out of the 2613 * critical path in kmem_cache_alloc(). 2614 */ 2615 if (unlikely(flags & GFP_SLAB_BUG_MASK)) { 2616 pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); 2617 BUG(); 2618 } 2619 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2620 2621 /* Take the node list lock to change the colour_next on this node */ 2622 check_irq_off(); 2623 n = get_node(cachep, nodeid); 2624 spin_lock(&n->list_lock); 2625 2626 /* Get colour for the slab, and cal the next value. */ 2627 offset = n->colour_next; 2628 n->colour_next++; 2629 if (n->colour_next >= cachep->colour) 2630 n->colour_next = 0; 2631 spin_unlock(&n->list_lock); 2632 2633 offset *= cachep->colour_off; 2634 2635 if (local_flags & __GFP_WAIT) 2636 local_irq_enable(); 2637 2638 /* 2639 * The test for missing atomic flag is performed here, rather than 2640 * the more obvious place, simply to reduce the critical path length 2641 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2642 * will eventually be caught here (where it matters). 2643 */ 2644 kmem_flagcheck(cachep, flags); 2645 2646 /* 2647 * Get mem for the objs. Attempt to allocate a physical page from 2648 * 'nodeid'. 2649 */ 2650 if (!page) 2651 page = kmem_getpages(cachep, local_flags, nodeid); 2652 if (!page) 2653 goto failed; 2654 2655 /* Get slab management. */ 2656 freelist = alloc_slabmgmt(cachep, page, offset, 2657 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2658 if (!freelist) 2659 goto opps1; 2660 2661 slab_map_pages(cachep, page, freelist); 2662 2663 cache_init_objs(cachep, page); 2664 2665 if (local_flags & __GFP_WAIT) 2666 local_irq_disable(); 2667 check_irq_off(); 2668 spin_lock(&n->list_lock); 2669 2670 /* Make slab active. */ 2671 list_add_tail(&page->lru, &(n->slabs_free)); 2672 STATS_INC_GROWN(cachep); 2673 n->free_objects += cachep->num; 2674 spin_unlock(&n->list_lock); 2675 return 1; 2676 opps1: 2677 kmem_freepages(cachep, page); 2678 failed: 2679 if (local_flags & __GFP_WAIT) 2680 local_irq_disable(); 2681 return 0; 2682 } 2683 2684 #if DEBUG 2685 2686 /* 2687 * Perform extra freeing checks: 2688 * - detect bad pointers. 2689 * - POISON/RED_ZONE checking 2690 */ 2691 static void kfree_debugcheck(const void *objp) 2692 { 2693 if (!virt_addr_valid(objp)) { 2694 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2695 (unsigned long)objp); 2696 BUG(); 2697 } 2698 } 2699 2700 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2701 { 2702 unsigned long long redzone1, redzone2; 2703 2704 redzone1 = *dbg_redzone1(cache, obj); 2705 redzone2 = *dbg_redzone2(cache, obj); 2706 2707 /* 2708 * Redzone is ok. 2709 */ 2710 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2711 return; 2712 2713 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2714 slab_error(cache, "double free detected"); 2715 else 2716 slab_error(cache, "memory outside object was overwritten"); 2717 2718 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2719 obj, redzone1, redzone2); 2720 } 2721 2722 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2723 unsigned long caller) 2724 { 2725 unsigned int objnr; 2726 struct page *page; 2727 2728 BUG_ON(virt_to_cache(objp) != cachep); 2729 2730 objp -= obj_offset(cachep); 2731 kfree_debugcheck(objp); 2732 page = virt_to_head_page(objp); 2733 2734 if (cachep->flags & SLAB_RED_ZONE) { 2735 verify_redzone_free(cachep, objp); 2736 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2737 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2738 } 2739 if (cachep->flags & SLAB_STORE_USER) 2740 *dbg_userword(cachep, objp) = (void *)caller; 2741 2742 objnr = obj_to_index(cachep, page, objp); 2743 2744 BUG_ON(objnr >= cachep->num); 2745 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2746 2747 set_obj_status(page, objnr, OBJECT_FREE); 2748 if (cachep->flags & SLAB_POISON) { 2749 #ifdef CONFIG_DEBUG_PAGEALLOC 2750 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2751 store_stackinfo(cachep, objp, caller); 2752 kernel_map_pages(virt_to_page(objp), 2753 cachep->size / PAGE_SIZE, 0); 2754 } else { 2755 poison_obj(cachep, objp, POISON_FREE); 2756 } 2757 #else 2758 poison_obj(cachep, objp, POISON_FREE); 2759 #endif 2760 } 2761 return objp; 2762 } 2763 2764 #else 2765 #define kfree_debugcheck(x) do { } while(0) 2766 #define cache_free_debugcheck(x,objp,z) (objp) 2767 #endif 2768 2769 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, 2770 bool force_refill) 2771 { 2772 int batchcount; 2773 struct kmem_cache_node *n; 2774 struct array_cache *ac; 2775 int node; 2776 2777 check_irq_off(); 2778 node = numa_mem_id(); 2779 if (unlikely(force_refill)) 2780 goto force_grow; 2781 retry: 2782 ac = cpu_cache_get(cachep); 2783 batchcount = ac->batchcount; 2784 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2785 /* 2786 * If there was little recent activity on this cache, then 2787 * perform only a partial refill. Otherwise we could generate 2788 * refill bouncing. 2789 */ 2790 batchcount = BATCHREFILL_LIMIT; 2791 } 2792 n = get_node(cachep, node); 2793 2794 BUG_ON(ac->avail > 0 || !n); 2795 spin_lock(&n->list_lock); 2796 2797 /* See if we can refill from the shared array */ 2798 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { 2799 n->shared->touched = 1; 2800 goto alloc_done; 2801 } 2802 2803 while (batchcount > 0) { 2804 struct list_head *entry; 2805 struct page *page; 2806 /* Get slab alloc is to come from. */ 2807 entry = n->slabs_partial.next; 2808 if (entry == &n->slabs_partial) { 2809 n->free_touched = 1; 2810 entry = n->slabs_free.next; 2811 if (entry == &n->slabs_free) 2812 goto must_grow; 2813 } 2814 2815 page = list_entry(entry, struct page, lru); 2816 check_spinlock_acquired(cachep); 2817 2818 /* 2819 * The slab was either on partial or free list so 2820 * there must be at least one object available for 2821 * allocation. 2822 */ 2823 BUG_ON(page->active >= cachep->num); 2824 2825 while (page->active < cachep->num && batchcount--) { 2826 STATS_INC_ALLOCED(cachep); 2827 STATS_INC_ACTIVE(cachep); 2828 STATS_SET_HIGH(cachep); 2829 2830 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, 2831 node)); 2832 } 2833 2834 /* move slabp to correct slabp list: */ 2835 list_del(&page->lru); 2836 if (page->active == cachep->num) 2837 list_add(&page->lru, &n->slabs_full); 2838 else 2839 list_add(&page->lru, &n->slabs_partial); 2840 } 2841 2842 must_grow: 2843 n->free_objects -= ac->avail; 2844 alloc_done: 2845 spin_unlock(&n->list_lock); 2846 2847 if (unlikely(!ac->avail)) { 2848 int x; 2849 force_grow: 2850 x = cache_grow(cachep, gfp_exact_node(flags), node, NULL); 2851 2852 /* cache_grow can reenable interrupts, then ac could change. */ 2853 ac = cpu_cache_get(cachep); 2854 node = numa_mem_id(); 2855 2856 /* no objects in sight? abort */ 2857 if (!x && (ac->avail == 0 || force_refill)) 2858 return NULL; 2859 2860 if (!ac->avail) /* objects refilled by interrupt? */ 2861 goto retry; 2862 } 2863 ac->touched = 1; 2864 2865 return ac_get_obj(cachep, ac, flags, force_refill); 2866 } 2867 2868 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 2869 gfp_t flags) 2870 { 2871 might_sleep_if(flags & __GFP_WAIT); 2872 #if DEBUG 2873 kmem_flagcheck(cachep, flags); 2874 #endif 2875 } 2876 2877 #if DEBUG 2878 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2879 gfp_t flags, void *objp, unsigned long caller) 2880 { 2881 struct page *page; 2882 2883 if (!objp) 2884 return objp; 2885 if (cachep->flags & SLAB_POISON) { 2886 #ifdef CONFIG_DEBUG_PAGEALLOC 2887 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2888 kernel_map_pages(virt_to_page(objp), 2889 cachep->size / PAGE_SIZE, 1); 2890 else 2891 check_poison_obj(cachep, objp); 2892 #else 2893 check_poison_obj(cachep, objp); 2894 #endif 2895 poison_obj(cachep, objp, POISON_INUSE); 2896 } 2897 if (cachep->flags & SLAB_STORE_USER) 2898 *dbg_userword(cachep, objp) = (void *)caller; 2899 2900 if (cachep->flags & SLAB_RED_ZONE) { 2901 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2902 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2903 slab_error(cachep, "double free, or memory outside" 2904 " object was overwritten"); 2905 printk(KERN_ERR 2906 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2907 objp, *dbg_redzone1(cachep, objp), 2908 *dbg_redzone2(cachep, objp)); 2909 } 2910 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2911 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2912 } 2913 2914 page = virt_to_head_page(objp); 2915 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); 2916 objp += obj_offset(cachep); 2917 if (cachep->ctor && cachep->flags & SLAB_POISON) 2918 cachep->ctor(objp); 2919 if (ARCH_SLAB_MINALIGN && 2920 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 2921 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 2922 objp, (int)ARCH_SLAB_MINALIGN); 2923 } 2924 return objp; 2925 } 2926 #else 2927 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2928 #endif 2929 2930 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 2931 { 2932 if (unlikely(cachep == kmem_cache)) 2933 return false; 2934 2935 return should_failslab(cachep->object_size, flags, cachep->flags); 2936 } 2937 2938 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 2939 { 2940 void *objp; 2941 struct array_cache *ac; 2942 bool force_refill = false; 2943 2944 check_irq_off(); 2945 2946 ac = cpu_cache_get(cachep); 2947 if (likely(ac->avail)) { 2948 ac->touched = 1; 2949 objp = ac_get_obj(cachep, ac, flags, false); 2950 2951 /* 2952 * Allow for the possibility all avail objects are not allowed 2953 * by the current flags 2954 */ 2955 if (objp) { 2956 STATS_INC_ALLOCHIT(cachep); 2957 goto out; 2958 } 2959 force_refill = true; 2960 } 2961 2962 STATS_INC_ALLOCMISS(cachep); 2963 objp = cache_alloc_refill(cachep, flags, force_refill); 2964 /* 2965 * the 'ac' may be updated by cache_alloc_refill(), 2966 * and kmemleak_erase() requires its correct value. 2967 */ 2968 ac = cpu_cache_get(cachep); 2969 2970 out: 2971 /* 2972 * To avoid a false negative, if an object that is in one of the 2973 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 2974 * treat the array pointers as a reference to the object. 2975 */ 2976 if (objp) 2977 kmemleak_erase(&ac->entry[ac->avail]); 2978 return objp; 2979 } 2980 2981 #ifdef CONFIG_NUMA 2982 /* 2983 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. 2984 * 2985 * If we are in_interrupt, then process context, including cpusets and 2986 * mempolicy, may not apply and should not be used for allocation policy. 2987 */ 2988 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 2989 { 2990 int nid_alloc, nid_here; 2991 2992 if (in_interrupt() || (flags & __GFP_THISNODE)) 2993 return NULL; 2994 nid_alloc = nid_here = numa_mem_id(); 2995 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 2996 nid_alloc = cpuset_slab_spread_node(); 2997 else if (current->mempolicy) 2998 nid_alloc = mempolicy_slab_node(); 2999 if (nid_alloc != nid_here) 3000 return ____cache_alloc_node(cachep, flags, nid_alloc); 3001 return NULL; 3002 } 3003 3004 /* 3005 * Fallback function if there was no memory available and no objects on a 3006 * certain node and fall back is permitted. First we scan all the 3007 * available node for available objects. If that fails then we 3008 * perform an allocation without specifying a node. This allows the page 3009 * allocator to do its reclaim / fallback magic. We then insert the 3010 * slab into the proper nodelist and then allocate from it. 3011 */ 3012 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3013 { 3014 struct zonelist *zonelist; 3015 gfp_t local_flags; 3016 struct zoneref *z; 3017 struct zone *zone; 3018 enum zone_type high_zoneidx = gfp_zone(flags); 3019 void *obj = NULL; 3020 int nid; 3021 unsigned int cpuset_mems_cookie; 3022 3023 if (flags & __GFP_THISNODE) 3024 return NULL; 3025 3026 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3027 3028 retry_cpuset: 3029 cpuset_mems_cookie = read_mems_allowed_begin(); 3030 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3031 3032 retry: 3033 /* 3034 * Look through allowed nodes for objects available 3035 * from existing per node queues. 3036 */ 3037 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3038 nid = zone_to_nid(zone); 3039 3040 if (cpuset_zone_allowed(zone, flags) && 3041 get_node(cache, nid) && 3042 get_node(cache, nid)->free_objects) { 3043 obj = ____cache_alloc_node(cache, 3044 gfp_exact_node(flags), nid); 3045 if (obj) 3046 break; 3047 } 3048 } 3049 3050 if (!obj) { 3051 /* 3052 * This allocation will be performed within the constraints 3053 * of the current cpuset / memory policy requirements. 3054 * We may trigger various forms of reclaim on the allowed 3055 * set and go into memory reserves if necessary. 3056 */ 3057 struct page *page; 3058 3059 if (local_flags & __GFP_WAIT) 3060 local_irq_enable(); 3061 kmem_flagcheck(cache, flags); 3062 page = kmem_getpages(cache, local_flags, numa_mem_id()); 3063 if (local_flags & __GFP_WAIT) 3064 local_irq_disable(); 3065 if (page) { 3066 /* 3067 * Insert into the appropriate per node queues 3068 */ 3069 nid = page_to_nid(page); 3070 if (cache_grow(cache, flags, nid, page)) { 3071 obj = ____cache_alloc_node(cache, 3072 gfp_exact_node(flags), nid); 3073 if (!obj) 3074 /* 3075 * Another processor may allocate the 3076 * objects in the slab since we are 3077 * not holding any locks. 3078 */ 3079 goto retry; 3080 } else { 3081 /* cache_grow already freed obj */ 3082 obj = NULL; 3083 } 3084 } 3085 } 3086 3087 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3088 goto retry_cpuset; 3089 return obj; 3090 } 3091 3092 /* 3093 * A interface to enable slab creation on nodeid 3094 */ 3095 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3096 int nodeid) 3097 { 3098 struct list_head *entry; 3099 struct page *page; 3100 struct kmem_cache_node *n; 3101 void *obj; 3102 int x; 3103 3104 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); 3105 n = get_node(cachep, nodeid); 3106 BUG_ON(!n); 3107 3108 retry: 3109 check_irq_off(); 3110 spin_lock(&n->list_lock); 3111 entry = n->slabs_partial.next; 3112 if (entry == &n->slabs_partial) { 3113 n->free_touched = 1; 3114 entry = n->slabs_free.next; 3115 if (entry == &n->slabs_free) 3116 goto must_grow; 3117 } 3118 3119 page = list_entry(entry, struct page, lru); 3120 check_spinlock_acquired_node(cachep, nodeid); 3121 3122 STATS_INC_NODEALLOCS(cachep); 3123 STATS_INC_ACTIVE(cachep); 3124 STATS_SET_HIGH(cachep); 3125 3126 BUG_ON(page->active == cachep->num); 3127 3128 obj = slab_get_obj(cachep, page, nodeid); 3129 n->free_objects--; 3130 /* move slabp to correct slabp list: */ 3131 list_del(&page->lru); 3132 3133 if (page->active == cachep->num) 3134 list_add(&page->lru, &n->slabs_full); 3135 else 3136 list_add(&page->lru, &n->slabs_partial); 3137 3138 spin_unlock(&n->list_lock); 3139 goto done; 3140 3141 must_grow: 3142 spin_unlock(&n->list_lock); 3143 x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL); 3144 if (x) 3145 goto retry; 3146 3147 return fallback_alloc(cachep, flags); 3148 3149 done: 3150 return obj; 3151 } 3152 3153 static __always_inline void * 3154 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3155 unsigned long caller) 3156 { 3157 unsigned long save_flags; 3158 void *ptr; 3159 int slab_node = numa_mem_id(); 3160 3161 flags &= gfp_allowed_mask; 3162 3163 lockdep_trace_alloc(flags); 3164 3165 if (slab_should_failslab(cachep, flags)) 3166 return NULL; 3167 3168 cachep = memcg_kmem_get_cache(cachep, flags); 3169 3170 cache_alloc_debugcheck_before(cachep, flags); 3171 local_irq_save(save_flags); 3172 3173 if (nodeid == NUMA_NO_NODE) 3174 nodeid = slab_node; 3175 3176 if (unlikely(!get_node(cachep, nodeid))) { 3177 /* Node not bootstrapped yet */ 3178 ptr = fallback_alloc(cachep, flags); 3179 goto out; 3180 } 3181 3182 if (nodeid == slab_node) { 3183 /* 3184 * Use the locally cached objects if possible. 3185 * However ____cache_alloc does not allow fallback 3186 * to other nodes. It may fail while we still have 3187 * objects on other nodes available. 3188 */ 3189 ptr = ____cache_alloc(cachep, flags); 3190 if (ptr) 3191 goto out; 3192 } 3193 /* ___cache_alloc_node can fall back to other nodes */ 3194 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3195 out: 3196 local_irq_restore(save_flags); 3197 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3198 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, 3199 flags); 3200 3201 if (likely(ptr)) { 3202 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); 3203 if (unlikely(flags & __GFP_ZERO)) 3204 memset(ptr, 0, cachep->object_size); 3205 } 3206 3207 memcg_kmem_put_cache(cachep); 3208 return ptr; 3209 } 3210 3211 static __always_inline void * 3212 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3213 { 3214 void *objp; 3215 3216 if (current->mempolicy || cpuset_do_slab_mem_spread()) { 3217 objp = alternate_node_alloc(cache, flags); 3218 if (objp) 3219 goto out; 3220 } 3221 objp = ____cache_alloc(cache, flags); 3222 3223 /* 3224 * We may just have run out of memory on the local node. 3225 * ____cache_alloc_node() knows how to locate memory on other nodes 3226 */ 3227 if (!objp) 3228 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); 3229 3230 out: 3231 return objp; 3232 } 3233 #else 3234 3235 static __always_inline void * 3236 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3237 { 3238 return ____cache_alloc(cachep, flags); 3239 } 3240 3241 #endif /* CONFIG_NUMA */ 3242 3243 static __always_inline void * 3244 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3245 { 3246 unsigned long save_flags; 3247 void *objp; 3248 3249 flags &= gfp_allowed_mask; 3250 3251 lockdep_trace_alloc(flags); 3252 3253 if (slab_should_failslab(cachep, flags)) 3254 return NULL; 3255 3256 cachep = memcg_kmem_get_cache(cachep, flags); 3257 3258 cache_alloc_debugcheck_before(cachep, flags); 3259 local_irq_save(save_flags); 3260 objp = __do_cache_alloc(cachep, flags); 3261 local_irq_restore(save_flags); 3262 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3263 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, 3264 flags); 3265 prefetchw(objp); 3266 3267 if (likely(objp)) { 3268 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); 3269 if (unlikely(flags & __GFP_ZERO)) 3270 memset(objp, 0, cachep->object_size); 3271 } 3272 3273 memcg_kmem_put_cache(cachep); 3274 return objp; 3275 } 3276 3277 /* 3278 * Caller needs to acquire correct kmem_cache_node's list_lock 3279 * @list: List of detached free slabs should be freed by caller 3280 */ 3281 static void free_block(struct kmem_cache *cachep, void **objpp, 3282 int nr_objects, int node, struct list_head *list) 3283 { 3284 int i; 3285 struct kmem_cache_node *n = get_node(cachep, node); 3286 3287 for (i = 0; i < nr_objects; i++) { 3288 void *objp; 3289 struct page *page; 3290 3291 clear_obj_pfmemalloc(&objpp[i]); 3292 objp = objpp[i]; 3293 3294 page = virt_to_head_page(objp); 3295 list_del(&page->lru); 3296 check_spinlock_acquired_node(cachep, node); 3297 slab_put_obj(cachep, page, objp, node); 3298 STATS_DEC_ACTIVE(cachep); 3299 n->free_objects++; 3300 3301 /* fixup slab chains */ 3302 if (page->active == 0) { 3303 if (n->free_objects > n->free_limit) { 3304 n->free_objects -= cachep->num; 3305 list_add_tail(&page->lru, list); 3306 } else { 3307 list_add(&page->lru, &n->slabs_free); 3308 } 3309 } else { 3310 /* Unconditionally move a slab to the end of the 3311 * partial list on free - maximum time for the 3312 * other objects to be freed, too. 3313 */ 3314 list_add_tail(&page->lru, &n->slabs_partial); 3315 } 3316 } 3317 } 3318 3319 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3320 { 3321 int batchcount; 3322 struct kmem_cache_node *n; 3323 int node = numa_mem_id(); 3324 LIST_HEAD(list); 3325 3326 batchcount = ac->batchcount; 3327 #if DEBUG 3328 BUG_ON(!batchcount || batchcount > ac->avail); 3329 #endif 3330 check_irq_off(); 3331 n = get_node(cachep, node); 3332 spin_lock(&n->list_lock); 3333 if (n->shared) { 3334 struct array_cache *shared_array = n->shared; 3335 int max = shared_array->limit - shared_array->avail; 3336 if (max) { 3337 if (batchcount > max) 3338 batchcount = max; 3339 memcpy(&(shared_array->entry[shared_array->avail]), 3340 ac->entry, sizeof(void *) * batchcount); 3341 shared_array->avail += batchcount; 3342 goto free_done; 3343 } 3344 } 3345 3346 free_block(cachep, ac->entry, batchcount, node, &list); 3347 free_done: 3348 #if STATS 3349 { 3350 int i = 0; 3351 struct list_head *p; 3352 3353 p = n->slabs_free.next; 3354 while (p != &(n->slabs_free)) { 3355 struct page *page; 3356 3357 page = list_entry(p, struct page, lru); 3358 BUG_ON(page->active); 3359 3360 i++; 3361 p = p->next; 3362 } 3363 STATS_SET_FREEABLE(cachep, i); 3364 } 3365 #endif 3366 spin_unlock(&n->list_lock); 3367 slabs_destroy(cachep, &list); 3368 ac->avail -= batchcount; 3369 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3370 } 3371 3372 /* 3373 * Release an obj back to its cache. If the obj has a constructed state, it must 3374 * be in this state _before_ it is released. Called with disabled ints. 3375 */ 3376 static inline void __cache_free(struct kmem_cache *cachep, void *objp, 3377 unsigned long caller) 3378 { 3379 struct array_cache *ac = cpu_cache_get(cachep); 3380 3381 check_irq_off(); 3382 kmemleak_free_recursive(objp, cachep->flags); 3383 objp = cache_free_debugcheck(cachep, objp, caller); 3384 3385 kmemcheck_slab_free(cachep, objp, cachep->object_size); 3386 3387 /* 3388 * Skip calling cache_free_alien() when the platform is not numa. 3389 * This will avoid cache misses that happen while accessing slabp (which 3390 * is per page memory reference) to get nodeid. Instead use a global 3391 * variable to skip the call, which is mostly likely to be present in 3392 * the cache. 3393 */ 3394 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3395 return; 3396 3397 if (ac->avail < ac->limit) { 3398 STATS_INC_FREEHIT(cachep); 3399 } else { 3400 STATS_INC_FREEMISS(cachep); 3401 cache_flusharray(cachep, ac); 3402 } 3403 3404 ac_put_obj(cachep, ac, objp); 3405 } 3406 3407 /** 3408 * kmem_cache_alloc - Allocate an object 3409 * @cachep: The cache to allocate from. 3410 * @flags: See kmalloc(). 3411 * 3412 * Allocate an object from this cache. The flags are only relevant 3413 * if the cache has no available objects. 3414 */ 3415 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3416 { 3417 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3418 3419 trace_kmem_cache_alloc(_RET_IP_, ret, 3420 cachep->object_size, cachep->size, flags); 3421 3422 return ret; 3423 } 3424 EXPORT_SYMBOL(kmem_cache_alloc); 3425 3426 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3427 { 3428 __kmem_cache_free_bulk(s, size, p); 3429 } 3430 EXPORT_SYMBOL(kmem_cache_free_bulk); 3431 3432 bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3433 void **p) 3434 { 3435 return __kmem_cache_alloc_bulk(s, flags, size, p); 3436 } 3437 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3438 3439 #ifdef CONFIG_TRACING 3440 void * 3441 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3442 { 3443 void *ret; 3444 3445 ret = slab_alloc(cachep, flags, _RET_IP_); 3446 3447 trace_kmalloc(_RET_IP_, ret, 3448 size, cachep->size, flags); 3449 return ret; 3450 } 3451 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3452 #endif 3453 3454 #ifdef CONFIG_NUMA 3455 /** 3456 * kmem_cache_alloc_node - Allocate an object on the specified node 3457 * @cachep: The cache to allocate from. 3458 * @flags: See kmalloc(). 3459 * @nodeid: node number of the target node. 3460 * 3461 * Identical to kmem_cache_alloc but it will allocate memory on the given 3462 * node, which can improve the performance for cpu bound structures. 3463 * 3464 * Fallback to other node is possible if __GFP_THISNODE is not set. 3465 */ 3466 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3467 { 3468 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3469 3470 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3471 cachep->object_size, cachep->size, 3472 flags, nodeid); 3473 3474 return ret; 3475 } 3476 EXPORT_SYMBOL(kmem_cache_alloc_node); 3477 3478 #ifdef CONFIG_TRACING 3479 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3480 gfp_t flags, 3481 int nodeid, 3482 size_t size) 3483 { 3484 void *ret; 3485 3486 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3487 3488 trace_kmalloc_node(_RET_IP_, ret, 3489 size, cachep->size, 3490 flags, nodeid); 3491 return ret; 3492 } 3493 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3494 #endif 3495 3496 static __always_inline void * 3497 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3498 { 3499 struct kmem_cache *cachep; 3500 3501 cachep = kmalloc_slab(size, flags); 3502 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3503 return cachep; 3504 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3505 } 3506 3507 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3508 { 3509 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3510 } 3511 EXPORT_SYMBOL(__kmalloc_node); 3512 3513 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3514 int node, unsigned long caller) 3515 { 3516 return __do_kmalloc_node(size, flags, node, caller); 3517 } 3518 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3519 #endif /* CONFIG_NUMA */ 3520 3521 /** 3522 * __do_kmalloc - allocate memory 3523 * @size: how many bytes of memory are required. 3524 * @flags: the type of memory to allocate (see kmalloc). 3525 * @caller: function caller for debug tracking of the caller 3526 */ 3527 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3528 unsigned long caller) 3529 { 3530 struct kmem_cache *cachep; 3531 void *ret; 3532 3533 cachep = kmalloc_slab(size, flags); 3534 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3535 return cachep; 3536 ret = slab_alloc(cachep, flags, caller); 3537 3538 trace_kmalloc(caller, ret, 3539 size, cachep->size, flags); 3540 3541 return ret; 3542 } 3543 3544 void *__kmalloc(size_t size, gfp_t flags) 3545 { 3546 return __do_kmalloc(size, flags, _RET_IP_); 3547 } 3548 EXPORT_SYMBOL(__kmalloc); 3549 3550 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3551 { 3552 return __do_kmalloc(size, flags, caller); 3553 } 3554 EXPORT_SYMBOL(__kmalloc_track_caller); 3555 3556 /** 3557 * kmem_cache_free - Deallocate an object 3558 * @cachep: The cache the allocation was from. 3559 * @objp: The previously allocated object. 3560 * 3561 * Free an object which was previously allocated from this 3562 * cache. 3563 */ 3564 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3565 { 3566 unsigned long flags; 3567 cachep = cache_from_obj(cachep, objp); 3568 if (!cachep) 3569 return; 3570 3571 local_irq_save(flags); 3572 debug_check_no_locks_freed(objp, cachep->object_size); 3573 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3574 debug_check_no_obj_freed(objp, cachep->object_size); 3575 __cache_free(cachep, objp, _RET_IP_); 3576 local_irq_restore(flags); 3577 3578 trace_kmem_cache_free(_RET_IP_, objp); 3579 } 3580 EXPORT_SYMBOL(kmem_cache_free); 3581 3582 /** 3583 * kfree - free previously allocated memory 3584 * @objp: pointer returned by kmalloc. 3585 * 3586 * If @objp is NULL, no operation is performed. 3587 * 3588 * Don't free memory not originally allocated by kmalloc() 3589 * or you will run into trouble. 3590 */ 3591 void kfree(const void *objp) 3592 { 3593 struct kmem_cache *c; 3594 unsigned long flags; 3595 3596 trace_kfree(_RET_IP_, objp); 3597 3598 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3599 return; 3600 local_irq_save(flags); 3601 kfree_debugcheck(objp); 3602 c = virt_to_cache(objp); 3603 debug_check_no_locks_freed(objp, c->object_size); 3604 3605 debug_check_no_obj_freed(objp, c->object_size); 3606 __cache_free(c, (void *)objp, _RET_IP_); 3607 local_irq_restore(flags); 3608 } 3609 EXPORT_SYMBOL(kfree); 3610 3611 /* 3612 * This initializes kmem_cache_node or resizes various caches for all nodes. 3613 */ 3614 static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) 3615 { 3616 int node; 3617 struct kmem_cache_node *n; 3618 struct array_cache *new_shared; 3619 struct alien_cache **new_alien = NULL; 3620 3621 for_each_online_node(node) { 3622 3623 if (use_alien_caches) { 3624 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 3625 if (!new_alien) 3626 goto fail; 3627 } 3628 3629 new_shared = NULL; 3630 if (cachep->shared) { 3631 new_shared = alloc_arraycache(node, 3632 cachep->shared*cachep->batchcount, 3633 0xbaadf00d, gfp); 3634 if (!new_shared) { 3635 free_alien_cache(new_alien); 3636 goto fail; 3637 } 3638 } 3639 3640 n = get_node(cachep, node); 3641 if (n) { 3642 struct array_cache *shared = n->shared; 3643 LIST_HEAD(list); 3644 3645 spin_lock_irq(&n->list_lock); 3646 3647 if (shared) 3648 free_block(cachep, shared->entry, 3649 shared->avail, node, &list); 3650 3651 n->shared = new_shared; 3652 if (!n->alien) { 3653 n->alien = new_alien; 3654 new_alien = NULL; 3655 } 3656 n->free_limit = (1 + nr_cpus_node(node)) * 3657 cachep->batchcount + cachep->num; 3658 spin_unlock_irq(&n->list_lock); 3659 slabs_destroy(cachep, &list); 3660 kfree(shared); 3661 free_alien_cache(new_alien); 3662 continue; 3663 } 3664 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 3665 if (!n) { 3666 free_alien_cache(new_alien); 3667 kfree(new_shared); 3668 goto fail; 3669 } 3670 3671 kmem_cache_node_init(n); 3672 n->next_reap = jiffies + REAPTIMEOUT_NODE + 3673 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 3674 n->shared = new_shared; 3675 n->alien = new_alien; 3676 n->free_limit = (1 + nr_cpus_node(node)) * 3677 cachep->batchcount + cachep->num; 3678 cachep->node[node] = n; 3679 } 3680 return 0; 3681 3682 fail: 3683 if (!cachep->list.next) { 3684 /* Cache is not active yet. Roll back what we did */ 3685 node--; 3686 while (node >= 0) { 3687 n = get_node(cachep, node); 3688 if (n) { 3689 kfree(n->shared); 3690 free_alien_cache(n->alien); 3691 kfree(n); 3692 cachep->node[node] = NULL; 3693 } 3694 node--; 3695 } 3696 } 3697 return -ENOMEM; 3698 } 3699 3700 /* Always called with the slab_mutex held */ 3701 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, 3702 int batchcount, int shared, gfp_t gfp) 3703 { 3704 struct array_cache __percpu *cpu_cache, *prev; 3705 int cpu; 3706 3707 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); 3708 if (!cpu_cache) 3709 return -ENOMEM; 3710 3711 prev = cachep->cpu_cache; 3712 cachep->cpu_cache = cpu_cache; 3713 kick_all_cpus_sync(); 3714 3715 check_irq_on(); 3716 cachep->batchcount = batchcount; 3717 cachep->limit = limit; 3718 cachep->shared = shared; 3719 3720 if (!prev) 3721 goto alloc_node; 3722 3723 for_each_online_cpu(cpu) { 3724 LIST_HEAD(list); 3725 int node; 3726 struct kmem_cache_node *n; 3727 struct array_cache *ac = per_cpu_ptr(prev, cpu); 3728 3729 node = cpu_to_mem(cpu); 3730 n = get_node(cachep, node); 3731 spin_lock_irq(&n->list_lock); 3732 free_block(cachep, ac->entry, ac->avail, node, &list); 3733 spin_unlock_irq(&n->list_lock); 3734 slabs_destroy(cachep, &list); 3735 } 3736 free_percpu(prev); 3737 3738 alloc_node: 3739 return alloc_kmem_cache_node(cachep, gfp); 3740 } 3741 3742 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3743 int batchcount, int shared, gfp_t gfp) 3744 { 3745 int ret; 3746 struct kmem_cache *c; 3747 3748 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3749 3750 if (slab_state < FULL) 3751 return ret; 3752 3753 if ((ret < 0) || !is_root_cache(cachep)) 3754 return ret; 3755 3756 lockdep_assert_held(&slab_mutex); 3757 for_each_memcg_cache(c, cachep) { 3758 /* return value determined by the root cache only */ 3759 __do_tune_cpucache(c, limit, batchcount, shared, gfp); 3760 } 3761 3762 return ret; 3763 } 3764 3765 /* Called with slab_mutex held always */ 3766 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3767 { 3768 int err; 3769 int limit = 0; 3770 int shared = 0; 3771 int batchcount = 0; 3772 3773 if (!is_root_cache(cachep)) { 3774 struct kmem_cache *root = memcg_root_cache(cachep); 3775 limit = root->limit; 3776 shared = root->shared; 3777 batchcount = root->batchcount; 3778 } 3779 3780 if (limit && shared && batchcount) 3781 goto skip_setup; 3782 /* 3783 * The head array serves three purposes: 3784 * - create a LIFO ordering, i.e. return objects that are cache-warm 3785 * - reduce the number of spinlock operations. 3786 * - reduce the number of linked list operations on the slab and 3787 * bufctl chains: array operations are cheaper. 3788 * The numbers are guessed, we should auto-tune as described by 3789 * Bonwick. 3790 */ 3791 if (cachep->size > 131072) 3792 limit = 1; 3793 else if (cachep->size > PAGE_SIZE) 3794 limit = 8; 3795 else if (cachep->size > 1024) 3796 limit = 24; 3797 else if (cachep->size > 256) 3798 limit = 54; 3799 else 3800 limit = 120; 3801 3802 /* 3803 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3804 * allocation behaviour: Most allocs on one cpu, most free operations 3805 * on another cpu. For these cases, an efficient object passing between 3806 * cpus is necessary. This is provided by a shared array. The array 3807 * replaces Bonwick's magazine layer. 3808 * On uniprocessor, it's functionally equivalent (but less efficient) 3809 * to a larger limit. Thus disabled by default. 3810 */ 3811 shared = 0; 3812 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3813 shared = 8; 3814 3815 #if DEBUG 3816 /* 3817 * With debugging enabled, large batchcount lead to excessively long 3818 * periods with disabled local interrupts. Limit the batchcount 3819 */ 3820 if (limit > 32) 3821 limit = 32; 3822 #endif 3823 batchcount = (limit + 1) / 2; 3824 skip_setup: 3825 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3826 if (err) 3827 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3828 cachep->name, -err); 3829 return err; 3830 } 3831 3832 /* 3833 * Drain an array if it contains any elements taking the node lock only if 3834 * necessary. Note that the node listlock also protects the array_cache 3835 * if drain_array() is used on the shared array. 3836 */ 3837 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3838 struct array_cache *ac, int force, int node) 3839 { 3840 LIST_HEAD(list); 3841 int tofree; 3842 3843 if (!ac || !ac->avail) 3844 return; 3845 if (ac->touched && !force) { 3846 ac->touched = 0; 3847 } else { 3848 spin_lock_irq(&n->list_lock); 3849 if (ac->avail) { 3850 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3851 if (tofree > ac->avail) 3852 tofree = (ac->avail + 1) / 2; 3853 free_block(cachep, ac->entry, tofree, node, &list); 3854 ac->avail -= tofree; 3855 memmove(ac->entry, &(ac->entry[tofree]), 3856 sizeof(void *) * ac->avail); 3857 } 3858 spin_unlock_irq(&n->list_lock); 3859 slabs_destroy(cachep, &list); 3860 } 3861 } 3862 3863 /** 3864 * cache_reap - Reclaim memory from caches. 3865 * @w: work descriptor 3866 * 3867 * Called from workqueue/eventd every few seconds. 3868 * Purpose: 3869 * - clear the per-cpu caches for this CPU. 3870 * - return freeable pages to the main free memory pool. 3871 * 3872 * If we cannot acquire the cache chain mutex then just give up - we'll try 3873 * again on the next iteration. 3874 */ 3875 static void cache_reap(struct work_struct *w) 3876 { 3877 struct kmem_cache *searchp; 3878 struct kmem_cache_node *n; 3879 int node = numa_mem_id(); 3880 struct delayed_work *work = to_delayed_work(w); 3881 3882 if (!mutex_trylock(&slab_mutex)) 3883 /* Give up. Setup the next iteration. */ 3884 goto out; 3885 3886 list_for_each_entry(searchp, &slab_caches, list) { 3887 check_irq_on(); 3888 3889 /* 3890 * We only take the node lock if absolutely necessary and we 3891 * have established with reasonable certainty that 3892 * we can do some work if the lock was obtained. 3893 */ 3894 n = get_node(searchp, node); 3895 3896 reap_alien(searchp, n); 3897 3898 drain_array(searchp, n, cpu_cache_get(searchp), 0, node); 3899 3900 /* 3901 * These are racy checks but it does not matter 3902 * if we skip one check or scan twice. 3903 */ 3904 if (time_after(n->next_reap, jiffies)) 3905 goto next; 3906 3907 n->next_reap = jiffies + REAPTIMEOUT_NODE; 3908 3909 drain_array(searchp, n, n->shared, 0, node); 3910 3911 if (n->free_touched) 3912 n->free_touched = 0; 3913 else { 3914 int freed; 3915 3916 freed = drain_freelist(searchp, n, (n->free_limit + 3917 5 * searchp->num - 1) / (5 * searchp->num)); 3918 STATS_ADD_REAPED(searchp, freed); 3919 } 3920 next: 3921 cond_resched(); 3922 } 3923 check_irq_on(); 3924 mutex_unlock(&slab_mutex); 3925 next_reap_node(); 3926 out: 3927 /* Set up the next iteration */ 3928 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); 3929 } 3930 3931 #ifdef CONFIG_SLABINFO 3932 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 3933 { 3934 struct page *page; 3935 unsigned long active_objs; 3936 unsigned long num_objs; 3937 unsigned long active_slabs = 0; 3938 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 3939 const char *name; 3940 char *error = NULL; 3941 int node; 3942 struct kmem_cache_node *n; 3943 3944 active_objs = 0; 3945 num_slabs = 0; 3946 for_each_kmem_cache_node(cachep, node, n) { 3947 3948 check_irq_on(); 3949 spin_lock_irq(&n->list_lock); 3950 3951 list_for_each_entry(page, &n->slabs_full, lru) { 3952 if (page->active != cachep->num && !error) 3953 error = "slabs_full accounting error"; 3954 active_objs += cachep->num; 3955 active_slabs++; 3956 } 3957 list_for_each_entry(page, &n->slabs_partial, lru) { 3958 if (page->active == cachep->num && !error) 3959 error = "slabs_partial accounting error"; 3960 if (!page->active && !error) 3961 error = "slabs_partial accounting error"; 3962 active_objs += page->active; 3963 active_slabs++; 3964 } 3965 list_for_each_entry(page, &n->slabs_free, lru) { 3966 if (page->active && !error) 3967 error = "slabs_free accounting error"; 3968 num_slabs++; 3969 } 3970 free_objects += n->free_objects; 3971 if (n->shared) 3972 shared_avail += n->shared->avail; 3973 3974 spin_unlock_irq(&n->list_lock); 3975 } 3976 num_slabs += active_slabs; 3977 num_objs = num_slabs * cachep->num; 3978 if (num_objs - active_objs != free_objects && !error) 3979 error = "free_objects accounting error"; 3980 3981 name = cachep->name; 3982 if (error) 3983 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 3984 3985 sinfo->active_objs = active_objs; 3986 sinfo->num_objs = num_objs; 3987 sinfo->active_slabs = active_slabs; 3988 sinfo->num_slabs = num_slabs; 3989 sinfo->shared_avail = shared_avail; 3990 sinfo->limit = cachep->limit; 3991 sinfo->batchcount = cachep->batchcount; 3992 sinfo->shared = cachep->shared; 3993 sinfo->objects_per_slab = cachep->num; 3994 sinfo->cache_order = cachep->gfporder; 3995 } 3996 3997 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 3998 { 3999 #if STATS 4000 { /* node stats */ 4001 unsigned long high = cachep->high_mark; 4002 unsigned long allocs = cachep->num_allocations; 4003 unsigned long grown = cachep->grown; 4004 unsigned long reaped = cachep->reaped; 4005 unsigned long errors = cachep->errors; 4006 unsigned long max_freeable = cachep->max_freeable; 4007 unsigned long node_allocs = cachep->node_allocs; 4008 unsigned long node_frees = cachep->node_frees; 4009 unsigned long overflows = cachep->node_overflow; 4010 4011 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " 4012 "%4lu %4lu %4lu %4lu %4lu", 4013 allocs, high, grown, 4014 reaped, errors, max_freeable, node_allocs, 4015 node_frees, overflows); 4016 } 4017 /* cpu stats */ 4018 { 4019 unsigned long allochit = atomic_read(&cachep->allochit); 4020 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4021 unsigned long freehit = atomic_read(&cachep->freehit); 4022 unsigned long freemiss = atomic_read(&cachep->freemiss); 4023 4024 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4025 allochit, allocmiss, freehit, freemiss); 4026 } 4027 #endif 4028 } 4029 4030 #define MAX_SLABINFO_WRITE 128 4031 /** 4032 * slabinfo_write - Tuning for the slab allocator 4033 * @file: unused 4034 * @buffer: user buffer 4035 * @count: data length 4036 * @ppos: unused 4037 */ 4038 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4039 size_t count, loff_t *ppos) 4040 { 4041 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4042 int limit, batchcount, shared, res; 4043 struct kmem_cache *cachep; 4044 4045 if (count > MAX_SLABINFO_WRITE) 4046 return -EINVAL; 4047 if (copy_from_user(&kbuf, buffer, count)) 4048 return -EFAULT; 4049 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4050 4051 tmp = strchr(kbuf, ' '); 4052 if (!tmp) 4053 return -EINVAL; 4054 *tmp = '\0'; 4055 tmp++; 4056 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4057 return -EINVAL; 4058 4059 /* Find the cache in the chain of caches. */ 4060 mutex_lock(&slab_mutex); 4061 res = -EINVAL; 4062 list_for_each_entry(cachep, &slab_caches, list) { 4063 if (!strcmp(cachep->name, kbuf)) { 4064 if (limit < 1 || batchcount < 1 || 4065 batchcount > limit || shared < 0) { 4066 res = 0; 4067 } else { 4068 res = do_tune_cpucache(cachep, limit, 4069 batchcount, shared, 4070 GFP_KERNEL); 4071 } 4072 break; 4073 } 4074 } 4075 mutex_unlock(&slab_mutex); 4076 if (res >= 0) 4077 res = count; 4078 return res; 4079 } 4080 4081 #ifdef CONFIG_DEBUG_SLAB_LEAK 4082 4083 static inline int add_caller(unsigned long *n, unsigned long v) 4084 { 4085 unsigned long *p; 4086 int l; 4087 if (!v) 4088 return 1; 4089 l = n[1]; 4090 p = n + 2; 4091 while (l) { 4092 int i = l/2; 4093 unsigned long *q = p + 2 * i; 4094 if (*q == v) { 4095 q[1]++; 4096 return 1; 4097 } 4098 if (*q > v) { 4099 l = i; 4100 } else { 4101 p = q + 2; 4102 l -= i + 1; 4103 } 4104 } 4105 if (++n[1] == n[0]) 4106 return 0; 4107 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4108 p[0] = v; 4109 p[1] = 1; 4110 return 1; 4111 } 4112 4113 static void handle_slab(unsigned long *n, struct kmem_cache *c, 4114 struct page *page) 4115 { 4116 void *p; 4117 int i; 4118 4119 if (n[0] == n[1]) 4120 return; 4121 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4122 if (get_obj_status(page, i) != OBJECT_ACTIVE) 4123 continue; 4124 4125 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4126 return; 4127 } 4128 } 4129 4130 static void show_symbol(struct seq_file *m, unsigned long address) 4131 { 4132 #ifdef CONFIG_KALLSYMS 4133 unsigned long offset, size; 4134 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4135 4136 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4137 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4138 if (modname[0]) 4139 seq_printf(m, " [%s]", modname); 4140 return; 4141 } 4142 #endif 4143 seq_printf(m, "%p", (void *)address); 4144 } 4145 4146 static int leaks_show(struct seq_file *m, void *p) 4147 { 4148 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4149 struct page *page; 4150 struct kmem_cache_node *n; 4151 const char *name; 4152 unsigned long *x = m->private; 4153 int node; 4154 int i; 4155 4156 if (!(cachep->flags & SLAB_STORE_USER)) 4157 return 0; 4158 if (!(cachep->flags & SLAB_RED_ZONE)) 4159 return 0; 4160 4161 /* OK, we can do it */ 4162 4163 x[1] = 0; 4164 4165 for_each_kmem_cache_node(cachep, node, n) { 4166 4167 check_irq_on(); 4168 spin_lock_irq(&n->list_lock); 4169 4170 list_for_each_entry(page, &n->slabs_full, lru) 4171 handle_slab(x, cachep, page); 4172 list_for_each_entry(page, &n->slabs_partial, lru) 4173 handle_slab(x, cachep, page); 4174 spin_unlock_irq(&n->list_lock); 4175 } 4176 name = cachep->name; 4177 if (x[0] == x[1]) { 4178 /* Increase the buffer size */ 4179 mutex_unlock(&slab_mutex); 4180 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4181 if (!m->private) { 4182 /* Too bad, we are really out */ 4183 m->private = x; 4184 mutex_lock(&slab_mutex); 4185 return -ENOMEM; 4186 } 4187 *(unsigned long *)m->private = x[0] * 2; 4188 kfree(x); 4189 mutex_lock(&slab_mutex); 4190 /* Now make sure this entry will be retried */ 4191 m->count = m->size; 4192 return 0; 4193 } 4194 for (i = 0; i < x[1]; i++) { 4195 seq_printf(m, "%s: %lu ", name, x[2*i+3]); 4196 show_symbol(m, x[2*i+2]); 4197 seq_putc(m, '\n'); 4198 } 4199 4200 return 0; 4201 } 4202 4203 static const struct seq_operations slabstats_op = { 4204 .start = slab_start, 4205 .next = slab_next, 4206 .stop = slab_stop, 4207 .show = leaks_show, 4208 }; 4209 4210 static int slabstats_open(struct inode *inode, struct file *file) 4211 { 4212 unsigned long *n; 4213 4214 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); 4215 if (!n) 4216 return -ENOMEM; 4217 4218 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4219 4220 return 0; 4221 } 4222 4223 static const struct file_operations proc_slabstats_operations = { 4224 .open = slabstats_open, 4225 .read = seq_read, 4226 .llseek = seq_lseek, 4227 .release = seq_release_private, 4228 }; 4229 #endif 4230 4231 static int __init slab_proc_init(void) 4232 { 4233 #ifdef CONFIG_DEBUG_SLAB_LEAK 4234 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4235 #endif 4236 return 0; 4237 } 4238 module_init(slab_proc_init); 4239 #endif 4240 4241 /** 4242 * ksize - get the actual amount of memory allocated for a given object 4243 * @objp: Pointer to the object 4244 * 4245 * kmalloc may internally round up allocations and return more memory 4246 * than requested. ksize() can be used to determine the actual amount of 4247 * memory allocated. The caller may use this additional memory, even though 4248 * a smaller amount of memory was initially specified with the kmalloc call. 4249 * The caller must guarantee that objp points to a valid object previously 4250 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4251 * must not be freed during the duration of the call. 4252 */ 4253 size_t ksize(const void *objp) 4254 { 4255 BUG_ON(!objp); 4256 if (unlikely(objp == ZERO_SIZE_PTR)) 4257 return 0; 4258 4259 return virt_to_cache(objp)->object_size; 4260 } 4261 EXPORT_SYMBOL(ksize); 4262