1 /* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same initializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'slab_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89 #include <linux/slab.h> 90 #include <linux/mm.h> 91 #include <linux/poison.h> 92 #include <linux/swap.h> 93 #include <linux/cache.h> 94 #include <linux/interrupt.h> 95 #include <linux/init.h> 96 #include <linux/compiler.h> 97 #include <linux/cpuset.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <linux/notifier.h> 101 #include <linux/kallsyms.h> 102 #include <linux/cpu.h> 103 #include <linux/sysctl.h> 104 #include <linux/module.h> 105 #include <linux/rcupdate.h> 106 #include <linux/string.h> 107 #include <linux/uaccess.h> 108 #include <linux/nodemask.h> 109 #include <linux/kmemleak.h> 110 #include <linux/mempolicy.h> 111 #include <linux/mutex.h> 112 #include <linux/fault-inject.h> 113 #include <linux/rtmutex.h> 114 #include <linux/reciprocal_div.h> 115 #include <linux/debugobjects.h> 116 #include <linux/kmemcheck.h> 117 #include <linux/memory.h> 118 #include <linux/prefetch.h> 119 120 #include <net/sock.h> 121 122 #include <asm/cacheflush.h> 123 #include <asm/tlbflush.h> 124 #include <asm/page.h> 125 126 #include <trace/events/kmem.h> 127 128 #include "internal.h" 129 130 #include "slab.h" 131 132 /* 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 134 * 0 for faster, smaller code (especially in the critical paths). 135 * 136 * STATS - 1 to collect stats for /proc/slabinfo. 137 * 0 for faster, smaller code (especially in the critical paths). 138 * 139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 140 */ 141 142 #ifdef CONFIG_DEBUG_SLAB 143 #define DEBUG 1 144 #define STATS 1 145 #define FORCED_DEBUG 1 146 #else 147 #define DEBUG 0 148 #define STATS 0 149 #define FORCED_DEBUG 0 150 #endif 151 152 /* Shouldn't this be in a header file somewhere? */ 153 #define BYTES_PER_WORD sizeof(void *) 154 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 155 156 #ifndef ARCH_KMALLOC_FLAGS 157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 158 #endif 159 160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 162 163 #if FREELIST_BYTE_INDEX 164 typedef unsigned char freelist_idx_t; 165 #else 166 typedef unsigned short freelist_idx_t; 167 #endif 168 169 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) 170 171 /* 172 * true if a page was allocated from pfmemalloc reserves for network-based 173 * swap 174 */ 175 static bool pfmemalloc_active __read_mostly; 176 177 /* 178 * struct array_cache 179 * 180 * Purpose: 181 * - LIFO ordering, to hand out cache-warm objects from _alloc 182 * - reduce the number of linked list operations 183 * - reduce spinlock operations 184 * 185 * The limit is stored in the per-cpu structure to reduce the data cache 186 * footprint. 187 * 188 */ 189 struct array_cache { 190 unsigned int avail; 191 unsigned int limit; 192 unsigned int batchcount; 193 unsigned int touched; 194 void *entry[]; /* 195 * Must have this definition in here for the proper 196 * alignment of array_cache. Also simplifies accessing 197 * the entries. 198 * 199 * Entries should not be directly dereferenced as 200 * entries belonging to slabs marked pfmemalloc will 201 * have the lower bits set SLAB_OBJ_PFMEMALLOC 202 */ 203 }; 204 205 struct alien_cache { 206 spinlock_t lock; 207 struct array_cache ac; 208 }; 209 210 #define SLAB_OBJ_PFMEMALLOC 1 211 static inline bool is_obj_pfmemalloc(void *objp) 212 { 213 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC; 214 } 215 216 static inline void set_obj_pfmemalloc(void **objp) 217 { 218 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC); 219 return; 220 } 221 222 static inline void clear_obj_pfmemalloc(void **objp) 223 { 224 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC); 225 } 226 227 /* 228 * bootstrap: The caches do not work without cpuarrays anymore, but the 229 * cpuarrays are allocated from the generic caches... 230 */ 231 #define BOOT_CPUCACHE_ENTRIES 1 232 struct arraycache_init { 233 struct array_cache cache; 234 void *entries[BOOT_CPUCACHE_ENTRIES]; 235 }; 236 237 /* 238 * Need this for bootstrapping a per node allocator. 239 */ 240 #define NUM_INIT_LISTS (2 * MAX_NUMNODES) 241 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 242 #define CACHE_CACHE 0 243 #define SIZE_NODE (MAX_NUMNODES) 244 245 static int drain_freelist(struct kmem_cache *cache, 246 struct kmem_cache_node *n, int tofree); 247 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 248 int node, struct list_head *list); 249 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); 250 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 251 static void cache_reap(struct work_struct *unused); 252 253 static int slab_early_init = 1; 254 255 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 256 257 static void kmem_cache_node_init(struct kmem_cache_node *parent) 258 { 259 INIT_LIST_HEAD(&parent->slabs_full); 260 INIT_LIST_HEAD(&parent->slabs_partial); 261 INIT_LIST_HEAD(&parent->slabs_free); 262 parent->shared = NULL; 263 parent->alien = NULL; 264 parent->colour_next = 0; 265 spin_lock_init(&parent->list_lock); 266 parent->free_objects = 0; 267 parent->free_touched = 0; 268 } 269 270 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 271 do { \ 272 INIT_LIST_HEAD(listp); \ 273 list_splice(&get_node(cachep, nodeid)->slab, listp); \ 274 } while (0) 275 276 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 277 do { \ 278 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 279 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 280 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 281 } while (0) 282 283 #define CFLGS_OFF_SLAB (0x80000000UL) 284 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 285 286 #define BATCHREFILL_LIMIT 16 287 /* 288 * Optimization question: fewer reaps means less probability for unnessary 289 * cpucache drain/refill cycles. 290 * 291 * OTOH the cpuarrays can contain lots of objects, 292 * which could lock up otherwise freeable slabs. 293 */ 294 #define REAPTIMEOUT_AC (2*HZ) 295 #define REAPTIMEOUT_NODE (4*HZ) 296 297 #if STATS 298 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 299 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 300 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 301 #define STATS_INC_GROWN(x) ((x)->grown++) 302 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 303 #define STATS_SET_HIGH(x) \ 304 do { \ 305 if ((x)->num_active > (x)->high_mark) \ 306 (x)->high_mark = (x)->num_active; \ 307 } while (0) 308 #define STATS_INC_ERR(x) ((x)->errors++) 309 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 310 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 311 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 312 #define STATS_SET_FREEABLE(x, i) \ 313 do { \ 314 if ((x)->max_freeable < i) \ 315 (x)->max_freeable = i; \ 316 } while (0) 317 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 318 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 319 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 320 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 321 #else 322 #define STATS_INC_ACTIVE(x) do { } while (0) 323 #define STATS_DEC_ACTIVE(x) do { } while (0) 324 #define STATS_INC_ALLOCED(x) do { } while (0) 325 #define STATS_INC_GROWN(x) do { } while (0) 326 #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) 327 #define STATS_SET_HIGH(x) do { } while (0) 328 #define STATS_INC_ERR(x) do { } while (0) 329 #define STATS_INC_NODEALLOCS(x) do { } while (0) 330 #define STATS_INC_NODEFREES(x) do { } while (0) 331 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 332 #define STATS_SET_FREEABLE(x, i) do { } while (0) 333 #define STATS_INC_ALLOCHIT(x) do { } while (0) 334 #define STATS_INC_ALLOCMISS(x) do { } while (0) 335 #define STATS_INC_FREEHIT(x) do { } while (0) 336 #define STATS_INC_FREEMISS(x) do { } while (0) 337 #endif 338 339 #if DEBUG 340 341 /* 342 * memory layout of objects: 343 * 0 : objp 344 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 345 * the end of an object is aligned with the end of the real 346 * allocation. Catches writes behind the end of the allocation. 347 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 348 * redzone word. 349 * cachep->obj_offset: The real object. 350 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 351 * cachep->size - 1* BYTES_PER_WORD: last caller address 352 * [BYTES_PER_WORD long] 353 */ 354 static int obj_offset(struct kmem_cache *cachep) 355 { 356 return cachep->obj_offset; 357 } 358 359 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 360 { 361 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 362 return (unsigned long long*) (objp + obj_offset(cachep) - 363 sizeof(unsigned long long)); 364 } 365 366 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 367 { 368 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 369 if (cachep->flags & SLAB_STORE_USER) 370 return (unsigned long long *)(objp + cachep->size - 371 sizeof(unsigned long long) - 372 REDZONE_ALIGN); 373 return (unsigned long long *) (objp + cachep->size - 374 sizeof(unsigned long long)); 375 } 376 377 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 378 { 379 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 380 return (void **)(objp + cachep->size - BYTES_PER_WORD); 381 } 382 383 #else 384 385 #define obj_offset(x) 0 386 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 387 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 388 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 389 390 #endif 391 392 #define OBJECT_FREE (0) 393 #define OBJECT_ACTIVE (1) 394 395 #ifdef CONFIG_DEBUG_SLAB_LEAK 396 397 static void set_obj_status(struct page *page, int idx, int val) 398 { 399 int freelist_size; 400 char *status; 401 struct kmem_cache *cachep = page->slab_cache; 402 403 freelist_size = cachep->num * sizeof(freelist_idx_t); 404 status = (char *)page->freelist + freelist_size; 405 status[idx] = val; 406 } 407 408 static inline unsigned int get_obj_status(struct page *page, int idx) 409 { 410 int freelist_size; 411 char *status; 412 struct kmem_cache *cachep = page->slab_cache; 413 414 freelist_size = cachep->num * sizeof(freelist_idx_t); 415 status = (char *)page->freelist + freelist_size; 416 417 return status[idx]; 418 } 419 420 #else 421 static inline void set_obj_status(struct page *page, int idx, int val) {} 422 423 #endif 424 425 /* 426 * Do not go above this order unless 0 objects fit into the slab or 427 * overridden on the command line. 428 */ 429 #define SLAB_MAX_ORDER_HI 1 430 #define SLAB_MAX_ORDER_LO 0 431 static int slab_max_order = SLAB_MAX_ORDER_LO; 432 static bool slab_max_order_set __initdata; 433 434 static inline struct kmem_cache *virt_to_cache(const void *obj) 435 { 436 struct page *page = virt_to_head_page(obj); 437 return page->slab_cache; 438 } 439 440 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, 441 unsigned int idx) 442 { 443 return page->s_mem + cache->size * idx; 444 } 445 446 /* 447 * We want to avoid an expensive divide : (offset / cache->size) 448 * Using the fact that size is a constant for a particular cache, 449 * we can replace (offset / cache->size) by 450 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 451 */ 452 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 453 const struct page *page, void *obj) 454 { 455 u32 offset = (obj - page->s_mem); 456 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 457 } 458 459 /* internal cache of cache description objs */ 460 static struct kmem_cache kmem_cache_boot = { 461 .batchcount = 1, 462 .limit = BOOT_CPUCACHE_ENTRIES, 463 .shared = 1, 464 .size = sizeof(struct kmem_cache), 465 .name = "kmem_cache", 466 }; 467 468 #define BAD_ALIEN_MAGIC 0x01020304ul 469 470 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 471 472 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 473 { 474 return this_cpu_ptr(cachep->cpu_cache); 475 } 476 477 static size_t calculate_freelist_size(int nr_objs, size_t align) 478 { 479 size_t freelist_size; 480 481 freelist_size = nr_objs * sizeof(freelist_idx_t); 482 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 483 freelist_size += nr_objs * sizeof(char); 484 485 if (align) 486 freelist_size = ALIGN(freelist_size, align); 487 488 return freelist_size; 489 } 490 491 static int calculate_nr_objs(size_t slab_size, size_t buffer_size, 492 size_t idx_size, size_t align) 493 { 494 int nr_objs; 495 size_t remained_size; 496 size_t freelist_size; 497 int extra_space = 0; 498 499 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 500 extra_space = sizeof(char); 501 /* 502 * Ignore padding for the initial guess. The padding 503 * is at most @align-1 bytes, and @buffer_size is at 504 * least @align. In the worst case, this result will 505 * be one greater than the number of objects that fit 506 * into the memory allocation when taking the padding 507 * into account. 508 */ 509 nr_objs = slab_size / (buffer_size + idx_size + extra_space); 510 511 /* 512 * This calculated number will be either the right 513 * amount, or one greater than what we want. 514 */ 515 remained_size = slab_size - nr_objs * buffer_size; 516 freelist_size = calculate_freelist_size(nr_objs, align); 517 if (remained_size < freelist_size) 518 nr_objs--; 519 520 return nr_objs; 521 } 522 523 /* 524 * Calculate the number of objects and left-over bytes for a given buffer size. 525 */ 526 static void cache_estimate(unsigned long gfporder, size_t buffer_size, 527 size_t align, int flags, size_t *left_over, 528 unsigned int *num) 529 { 530 int nr_objs; 531 size_t mgmt_size; 532 size_t slab_size = PAGE_SIZE << gfporder; 533 534 /* 535 * The slab management structure can be either off the slab or 536 * on it. For the latter case, the memory allocated for a 537 * slab is used for: 538 * 539 * - One unsigned int for each object 540 * - Padding to respect alignment of @align 541 * - @buffer_size bytes for each object 542 * 543 * If the slab management structure is off the slab, then the 544 * alignment will already be calculated into the size. Because 545 * the slabs are all pages aligned, the objects will be at the 546 * correct alignment when allocated. 547 */ 548 if (flags & CFLGS_OFF_SLAB) { 549 mgmt_size = 0; 550 nr_objs = slab_size / buffer_size; 551 552 } else { 553 nr_objs = calculate_nr_objs(slab_size, buffer_size, 554 sizeof(freelist_idx_t), align); 555 mgmt_size = calculate_freelist_size(nr_objs, align); 556 } 557 *num = nr_objs; 558 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 559 } 560 561 #if DEBUG 562 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 563 564 static void __slab_error(const char *function, struct kmem_cache *cachep, 565 char *msg) 566 { 567 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 568 function, cachep->name, msg); 569 dump_stack(); 570 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 571 } 572 #endif 573 574 /* 575 * By default on NUMA we use alien caches to stage the freeing of 576 * objects allocated from other nodes. This causes massive memory 577 * inefficiencies when using fake NUMA setup to split memory into a 578 * large number of small nodes, so it can be disabled on the command 579 * line 580 */ 581 582 static int use_alien_caches __read_mostly = 1; 583 static int __init noaliencache_setup(char *s) 584 { 585 use_alien_caches = 0; 586 return 1; 587 } 588 __setup("noaliencache", noaliencache_setup); 589 590 static int __init slab_max_order_setup(char *str) 591 { 592 get_option(&str, &slab_max_order); 593 slab_max_order = slab_max_order < 0 ? 0 : 594 min(slab_max_order, MAX_ORDER - 1); 595 slab_max_order_set = true; 596 597 return 1; 598 } 599 __setup("slab_max_order=", slab_max_order_setup); 600 601 #ifdef CONFIG_NUMA 602 /* 603 * Special reaping functions for NUMA systems called from cache_reap(). 604 * These take care of doing round robin flushing of alien caches (containing 605 * objects freed on different nodes from which they were allocated) and the 606 * flushing of remote pcps by calling drain_node_pages. 607 */ 608 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 609 610 static void init_reap_node(int cpu) 611 { 612 int node; 613 614 node = next_node(cpu_to_mem(cpu), node_online_map); 615 if (node == MAX_NUMNODES) 616 node = first_node(node_online_map); 617 618 per_cpu(slab_reap_node, cpu) = node; 619 } 620 621 static void next_reap_node(void) 622 { 623 int node = __this_cpu_read(slab_reap_node); 624 625 node = next_node(node, node_online_map); 626 if (unlikely(node >= MAX_NUMNODES)) 627 node = first_node(node_online_map); 628 __this_cpu_write(slab_reap_node, node); 629 } 630 631 #else 632 #define init_reap_node(cpu) do { } while (0) 633 #define next_reap_node(void) do { } while (0) 634 #endif 635 636 /* 637 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 638 * via the workqueue/eventd. 639 * Add the CPU number into the expiration time to minimize the possibility of 640 * the CPUs getting into lockstep and contending for the global cache chain 641 * lock. 642 */ 643 static void start_cpu_timer(int cpu) 644 { 645 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 646 647 /* 648 * When this gets called from do_initcalls via cpucache_init(), 649 * init_workqueues() has already run, so keventd will be setup 650 * at that time. 651 */ 652 if (keventd_up() && reap_work->work.func == NULL) { 653 init_reap_node(cpu); 654 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 655 schedule_delayed_work_on(cpu, reap_work, 656 __round_jiffies_relative(HZ, cpu)); 657 } 658 } 659 660 static void init_arraycache(struct array_cache *ac, int limit, int batch) 661 { 662 /* 663 * The array_cache structures contain pointers to free object. 664 * However, when such objects are allocated or transferred to another 665 * cache the pointers are not cleared and they could be counted as 666 * valid references during a kmemleak scan. Therefore, kmemleak must 667 * not scan such objects. 668 */ 669 kmemleak_no_scan(ac); 670 if (ac) { 671 ac->avail = 0; 672 ac->limit = limit; 673 ac->batchcount = batch; 674 ac->touched = 0; 675 } 676 } 677 678 static struct array_cache *alloc_arraycache(int node, int entries, 679 int batchcount, gfp_t gfp) 680 { 681 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); 682 struct array_cache *ac = NULL; 683 684 ac = kmalloc_node(memsize, gfp, node); 685 init_arraycache(ac, entries, batchcount); 686 return ac; 687 } 688 689 static inline bool is_slab_pfmemalloc(struct page *page) 690 { 691 return PageSlabPfmemalloc(page); 692 } 693 694 /* Clears pfmemalloc_active if no slabs have pfmalloc set */ 695 static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 696 struct array_cache *ac) 697 { 698 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); 699 struct page *page; 700 unsigned long flags; 701 702 if (!pfmemalloc_active) 703 return; 704 705 spin_lock_irqsave(&n->list_lock, flags); 706 list_for_each_entry(page, &n->slabs_full, lru) 707 if (is_slab_pfmemalloc(page)) 708 goto out; 709 710 list_for_each_entry(page, &n->slabs_partial, lru) 711 if (is_slab_pfmemalloc(page)) 712 goto out; 713 714 list_for_each_entry(page, &n->slabs_free, lru) 715 if (is_slab_pfmemalloc(page)) 716 goto out; 717 718 pfmemalloc_active = false; 719 out: 720 spin_unlock_irqrestore(&n->list_lock, flags); 721 } 722 723 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, 724 gfp_t flags, bool force_refill) 725 { 726 int i; 727 void *objp = ac->entry[--ac->avail]; 728 729 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ 730 if (unlikely(is_obj_pfmemalloc(objp))) { 731 struct kmem_cache_node *n; 732 733 if (gfp_pfmemalloc_allowed(flags)) { 734 clear_obj_pfmemalloc(&objp); 735 return objp; 736 } 737 738 /* The caller cannot use PFMEMALLOC objects, find another one */ 739 for (i = 0; i < ac->avail; i++) { 740 /* If a !PFMEMALLOC object is found, swap them */ 741 if (!is_obj_pfmemalloc(ac->entry[i])) { 742 objp = ac->entry[i]; 743 ac->entry[i] = ac->entry[ac->avail]; 744 ac->entry[ac->avail] = objp; 745 return objp; 746 } 747 } 748 749 /* 750 * If there are empty slabs on the slabs_free list and we are 751 * being forced to refill the cache, mark this one !pfmemalloc. 752 */ 753 n = get_node(cachep, numa_mem_id()); 754 if (!list_empty(&n->slabs_free) && force_refill) { 755 struct page *page = virt_to_head_page(objp); 756 ClearPageSlabPfmemalloc(page); 757 clear_obj_pfmemalloc(&objp); 758 recheck_pfmemalloc_active(cachep, ac); 759 return objp; 760 } 761 762 /* No !PFMEMALLOC objects available */ 763 ac->avail++; 764 objp = NULL; 765 } 766 767 return objp; 768 } 769 770 static inline void *ac_get_obj(struct kmem_cache *cachep, 771 struct array_cache *ac, gfp_t flags, bool force_refill) 772 { 773 void *objp; 774 775 if (unlikely(sk_memalloc_socks())) 776 objp = __ac_get_obj(cachep, ac, flags, force_refill); 777 else 778 objp = ac->entry[--ac->avail]; 779 780 return objp; 781 } 782 783 static noinline void *__ac_put_obj(struct kmem_cache *cachep, 784 struct array_cache *ac, void *objp) 785 { 786 if (unlikely(pfmemalloc_active)) { 787 /* Some pfmemalloc slabs exist, check if this is one */ 788 struct page *page = virt_to_head_page(objp); 789 if (PageSlabPfmemalloc(page)) 790 set_obj_pfmemalloc(&objp); 791 } 792 793 return objp; 794 } 795 796 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 797 void *objp) 798 { 799 if (unlikely(sk_memalloc_socks())) 800 objp = __ac_put_obj(cachep, ac, objp); 801 802 ac->entry[ac->avail++] = objp; 803 } 804 805 /* 806 * Transfer objects in one arraycache to another. 807 * Locking must be handled by the caller. 808 * 809 * Return the number of entries transferred. 810 */ 811 static int transfer_objects(struct array_cache *to, 812 struct array_cache *from, unsigned int max) 813 { 814 /* Figure out how many entries to transfer */ 815 int nr = min3(from->avail, max, to->limit - to->avail); 816 817 if (!nr) 818 return 0; 819 820 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 821 sizeof(void *) *nr); 822 823 from->avail -= nr; 824 to->avail += nr; 825 return nr; 826 } 827 828 #ifndef CONFIG_NUMA 829 830 #define drain_alien_cache(cachep, alien) do { } while (0) 831 #define reap_alien(cachep, n) do { } while (0) 832 833 static inline struct alien_cache **alloc_alien_cache(int node, 834 int limit, gfp_t gfp) 835 { 836 return (struct alien_cache **)BAD_ALIEN_MAGIC; 837 } 838 839 static inline void free_alien_cache(struct alien_cache **ac_ptr) 840 { 841 } 842 843 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 844 { 845 return 0; 846 } 847 848 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 849 gfp_t flags) 850 { 851 return NULL; 852 } 853 854 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 855 gfp_t flags, int nodeid) 856 { 857 return NULL; 858 } 859 860 static inline gfp_t gfp_exact_node(gfp_t flags) 861 { 862 return flags; 863 } 864 865 #else /* CONFIG_NUMA */ 866 867 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 868 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 869 870 static struct alien_cache *__alloc_alien_cache(int node, int entries, 871 int batch, gfp_t gfp) 872 { 873 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); 874 struct alien_cache *alc = NULL; 875 876 alc = kmalloc_node(memsize, gfp, node); 877 init_arraycache(&alc->ac, entries, batch); 878 spin_lock_init(&alc->lock); 879 return alc; 880 } 881 882 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 883 { 884 struct alien_cache **alc_ptr; 885 size_t memsize = sizeof(void *) * nr_node_ids; 886 int i; 887 888 if (limit > 1) 889 limit = 12; 890 alc_ptr = kzalloc_node(memsize, gfp, node); 891 if (!alc_ptr) 892 return NULL; 893 894 for_each_node(i) { 895 if (i == node || !node_online(i)) 896 continue; 897 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); 898 if (!alc_ptr[i]) { 899 for (i--; i >= 0; i--) 900 kfree(alc_ptr[i]); 901 kfree(alc_ptr); 902 return NULL; 903 } 904 } 905 return alc_ptr; 906 } 907 908 static void free_alien_cache(struct alien_cache **alc_ptr) 909 { 910 int i; 911 912 if (!alc_ptr) 913 return; 914 for_each_node(i) 915 kfree(alc_ptr[i]); 916 kfree(alc_ptr); 917 } 918 919 static void __drain_alien_cache(struct kmem_cache *cachep, 920 struct array_cache *ac, int node, 921 struct list_head *list) 922 { 923 struct kmem_cache_node *n = get_node(cachep, node); 924 925 if (ac->avail) { 926 spin_lock(&n->list_lock); 927 /* 928 * Stuff objects into the remote nodes shared array first. 929 * That way we could avoid the overhead of putting the objects 930 * into the free lists and getting them back later. 931 */ 932 if (n->shared) 933 transfer_objects(n->shared, ac, ac->limit); 934 935 free_block(cachep, ac->entry, ac->avail, node, list); 936 ac->avail = 0; 937 spin_unlock(&n->list_lock); 938 } 939 } 940 941 /* 942 * Called from cache_reap() to regularly drain alien caches round robin. 943 */ 944 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 945 { 946 int node = __this_cpu_read(slab_reap_node); 947 948 if (n->alien) { 949 struct alien_cache *alc = n->alien[node]; 950 struct array_cache *ac; 951 952 if (alc) { 953 ac = &alc->ac; 954 if (ac->avail && spin_trylock_irq(&alc->lock)) { 955 LIST_HEAD(list); 956 957 __drain_alien_cache(cachep, ac, node, &list); 958 spin_unlock_irq(&alc->lock); 959 slabs_destroy(cachep, &list); 960 } 961 } 962 } 963 } 964 965 static void drain_alien_cache(struct kmem_cache *cachep, 966 struct alien_cache **alien) 967 { 968 int i = 0; 969 struct alien_cache *alc; 970 struct array_cache *ac; 971 unsigned long flags; 972 973 for_each_online_node(i) { 974 alc = alien[i]; 975 if (alc) { 976 LIST_HEAD(list); 977 978 ac = &alc->ac; 979 spin_lock_irqsave(&alc->lock, flags); 980 __drain_alien_cache(cachep, ac, i, &list); 981 spin_unlock_irqrestore(&alc->lock, flags); 982 slabs_destroy(cachep, &list); 983 } 984 } 985 } 986 987 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, 988 int node, int page_node) 989 { 990 struct kmem_cache_node *n; 991 struct alien_cache *alien = NULL; 992 struct array_cache *ac; 993 LIST_HEAD(list); 994 995 n = get_node(cachep, node); 996 STATS_INC_NODEFREES(cachep); 997 if (n->alien && n->alien[page_node]) { 998 alien = n->alien[page_node]; 999 ac = &alien->ac; 1000 spin_lock(&alien->lock); 1001 if (unlikely(ac->avail == ac->limit)) { 1002 STATS_INC_ACOVERFLOW(cachep); 1003 __drain_alien_cache(cachep, ac, page_node, &list); 1004 } 1005 ac_put_obj(cachep, ac, objp); 1006 spin_unlock(&alien->lock); 1007 slabs_destroy(cachep, &list); 1008 } else { 1009 n = get_node(cachep, page_node); 1010 spin_lock(&n->list_lock); 1011 free_block(cachep, &objp, 1, page_node, &list); 1012 spin_unlock(&n->list_lock); 1013 slabs_destroy(cachep, &list); 1014 } 1015 return 1; 1016 } 1017 1018 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1019 { 1020 int page_node = page_to_nid(virt_to_page(objp)); 1021 int node = numa_mem_id(); 1022 /* 1023 * Make sure we are not freeing a object from another node to the array 1024 * cache on this cpu. 1025 */ 1026 if (likely(node == page_node)) 1027 return 0; 1028 1029 return __cache_free_alien(cachep, objp, node, page_node); 1030 } 1031 1032 /* 1033 * Construct gfp mask to allocate from a specific node but do not invoke reclaim 1034 * or warn about failures. 1035 */ 1036 static inline gfp_t gfp_exact_node(gfp_t flags) 1037 { 1038 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT; 1039 } 1040 #endif 1041 1042 /* 1043 * Allocates and initializes node for a node on each slab cache, used for 1044 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 1045 * will be allocated off-node since memory is not yet online for the new node. 1046 * When hotplugging memory or a cpu, existing node are not replaced if 1047 * already in use. 1048 * 1049 * Must hold slab_mutex. 1050 */ 1051 static int init_cache_node_node(int node) 1052 { 1053 struct kmem_cache *cachep; 1054 struct kmem_cache_node *n; 1055 const size_t memsize = sizeof(struct kmem_cache_node); 1056 1057 list_for_each_entry(cachep, &slab_caches, list) { 1058 /* 1059 * Set up the kmem_cache_node for cpu before we can 1060 * begin anything. Make sure some other cpu on this 1061 * node has not already allocated this 1062 */ 1063 n = get_node(cachep, node); 1064 if (!n) { 1065 n = kmalloc_node(memsize, GFP_KERNEL, node); 1066 if (!n) 1067 return -ENOMEM; 1068 kmem_cache_node_init(n); 1069 n->next_reap = jiffies + REAPTIMEOUT_NODE + 1070 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1071 1072 /* 1073 * The kmem_cache_nodes don't come and go as CPUs 1074 * come and go. slab_mutex is sufficient 1075 * protection here. 1076 */ 1077 cachep->node[node] = n; 1078 } 1079 1080 spin_lock_irq(&n->list_lock); 1081 n->free_limit = 1082 (1 + nr_cpus_node(node)) * 1083 cachep->batchcount + cachep->num; 1084 spin_unlock_irq(&n->list_lock); 1085 } 1086 return 0; 1087 } 1088 1089 static inline int slabs_tofree(struct kmem_cache *cachep, 1090 struct kmem_cache_node *n) 1091 { 1092 return (n->free_objects + cachep->num - 1) / cachep->num; 1093 } 1094 1095 static void cpuup_canceled(long cpu) 1096 { 1097 struct kmem_cache *cachep; 1098 struct kmem_cache_node *n = NULL; 1099 int node = cpu_to_mem(cpu); 1100 const struct cpumask *mask = cpumask_of_node(node); 1101 1102 list_for_each_entry(cachep, &slab_caches, list) { 1103 struct array_cache *nc; 1104 struct array_cache *shared; 1105 struct alien_cache **alien; 1106 LIST_HEAD(list); 1107 1108 n = get_node(cachep, node); 1109 if (!n) 1110 continue; 1111 1112 spin_lock_irq(&n->list_lock); 1113 1114 /* Free limit for this kmem_cache_node */ 1115 n->free_limit -= cachep->batchcount; 1116 1117 /* cpu is dead; no one can alloc from it. */ 1118 nc = per_cpu_ptr(cachep->cpu_cache, cpu); 1119 if (nc) { 1120 free_block(cachep, nc->entry, nc->avail, node, &list); 1121 nc->avail = 0; 1122 } 1123 1124 if (!cpumask_empty(mask)) { 1125 spin_unlock_irq(&n->list_lock); 1126 goto free_slab; 1127 } 1128 1129 shared = n->shared; 1130 if (shared) { 1131 free_block(cachep, shared->entry, 1132 shared->avail, node, &list); 1133 n->shared = NULL; 1134 } 1135 1136 alien = n->alien; 1137 n->alien = NULL; 1138 1139 spin_unlock_irq(&n->list_lock); 1140 1141 kfree(shared); 1142 if (alien) { 1143 drain_alien_cache(cachep, alien); 1144 free_alien_cache(alien); 1145 } 1146 1147 free_slab: 1148 slabs_destroy(cachep, &list); 1149 } 1150 /* 1151 * In the previous loop, all the objects were freed to 1152 * the respective cache's slabs, now we can go ahead and 1153 * shrink each nodelist to its limit. 1154 */ 1155 list_for_each_entry(cachep, &slab_caches, list) { 1156 n = get_node(cachep, node); 1157 if (!n) 1158 continue; 1159 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1160 } 1161 } 1162 1163 static int cpuup_prepare(long cpu) 1164 { 1165 struct kmem_cache *cachep; 1166 struct kmem_cache_node *n = NULL; 1167 int node = cpu_to_mem(cpu); 1168 int err; 1169 1170 /* 1171 * We need to do this right in the beginning since 1172 * alloc_arraycache's are going to use this list. 1173 * kmalloc_node allows us to add the slab to the right 1174 * kmem_cache_node and not this cpu's kmem_cache_node 1175 */ 1176 err = init_cache_node_node(node); 1177 if (err < 0) 1178 goto bad; 1179 1180 /* 1181 * Now we can go ahead with allocating the shared arrays and 1182 * array caches 1183 */ 1184 list_for_each_entry(cachep, &slab_caches, list) { 1185 struct array_cache *shared = NULL; 1186 struct alien_cache **alien = NULL; 1187 1188 if (cachep->shared) { 1189 shared = alloc_arraycache(node, 1190 cachep->shared * cachep->batchcount, 1191 0xbaadf00d, GFP_KERNEL); 1192 if (!shared) 1193 goto bad; 1194 } 1195 if (use_alien_caches) { 1196 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); 1197 if (!alien) { 1198 kfree(shared); 1199 goto bad; 1200 } 1201 } 1202 n = get_node(cachep, node); 1203 BUG_ON(!n); 1204 1205 spin_lock_irq(&n->list_lock); 1206 if (!n->shared) { 1207 /* 1208 * We are serialised from CPU_DEAD or 1209 * CPU_UP_CANCELLED by the cpucontrol lock 1210 */ 1211 n->shared = shared; 1212 shared = NULL; 1213 } 1214 #ifdef CONFIG_NUMA 1215 if (!n->alien) { 1216 n->alien = alien; 1217 alien = NULL; 1218 } 1219 #endif 1220 spin_unlock_irq(&n->list_lock); 1221 kfree(shared); 1222 free_alien_cache(alien); 1223 } 1224 1225 return 0; 1226 bad: 1227 cpuup_canceled(cpu); 1228 return -ENOMEM; 1229 } 1230 1231 static int cpuup_callback(struct notifier_block *nfb, 1232 unsigned long action, void *hcpu) 1233 { 1234 long cpu = (long)hcpu; 1235 int err = 0; 1236 1237 switch (action) { 1238 case CPU_UP_PREPARE: 1239 case CPU_UP_PREPARE_FROZEN: 1240 mutex_lock(&slab_mutex); 1241 err = cpuup_prepare(cpu); 1242 mutex_unlock(&slab_mutex); 1243 break; 1244 case CPU_ONLINE: 1245 case CPU_ONLINE_FROZEN: 1246 start_cpu_timer(cpu); 1247 break; 1248 #ifdef CONFIG_HOTPLUG_CPU 1249 case CPU_DOWN_PREPARE: 1250 case CPU_DOWN_PREPARE_FROZEN: 1251 /* 1252 * Shutdown cache reaper. Note that the slab_mutex is 1253 * held so that if cache_reap() is invoked it cannot do 1254 * anything expensive but will only modify reap_work 1255 * and reschedule the timer. 1256 */ 1257 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1258 /* Now the cache_reaper is guaranteed to be not running. */ 1259 per_cpu(slab_reap_work, cpu).work.func = NULL; 1260 break; 1261 case CPU_DOWN_FAILED: 1262 case CPU_DOWN_FAILED_FROZEN: 1263 start_cpu_timer(cpu); 1264 break; 1265 case CPU_DEAD: 1266 case CPU_DEAD_FROZEN: 1267 /* 1268 * Even if all the cpus of a node are down, we don't free the 1269 * kmem_cache_node of any cache. This to avoid a race between 1270 * cpu_down, and a kmalloc allocation from another cpu for 1271 * memory from the node of the cpu going down. The node 1272 * structure is usually allocated from kmem_cache_create() and 1273 * gets destroyed at kmem_cache_destroy(). 1274 */ 1275 /* fall through */ 1276 #endif 1277 case CPU_UP_CANCELED: 1278 case CPU_UP_CANCELED_FROZEN: 1279 mutex_lock(&slab_mutex); 1280 cpuup_canceled(cpu); 1281 mutex_unlock(&slab_mutex); 1282 break; 1283 } 1284 return notifier_from_errno(err); 1285 } 1286 1287 static struct notifier_block cpucache_notifier = { 1288 &cpuup_callback, NULL, 0 1289 }; 1290 1291 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1292 /* 1293 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1294 * Returns -EBUSY if all objects cannot be drained so that the node is not 1295 * removed. 1296 * 1297 * Must hold slab_mutex. 1298 */ 1299 static int __meminit drain_cache_node_node(int node) 1300 { 1301 struct kmem_cache *cachep; 1302 int ret = 0; 1303 1304 list_for_each_entry(cachep, &slab_caches, list) { 1305 struct kmem_cache_node *n; 1306 1307 n = get_node(cachep, node); 1308 if (!n) 1309 continue; 1310 1311 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1312 1313 if (!list_empty(&n->slabs_full) || 1314 !list_empty(&n->slabs_partial)) { 1315 ret = -EBUSY; 1316 break; 1317 } 1318 } 1319 return ret; 1320 } 1321 1322 static int __meminit slab_memory_callback(struct notifier_block *self, 1323 unsigned long action, void *arg) 1324 { 1325 struct memory_notify *mnb = arg; 1326 int ret = 0; 1327 int nid; 1328 1329 nid = mnb->status_change_nid; 1330 if (nid < 0) 1331 goto out; 1332 1333 switch (action) { 1334 case MEM_GOING_ONLINE: 1335 mutex_lock(&slab_mutex); 1336 ret = init_cache_node_node(nid); 1337 mutex_unlock(&slab_mutex); 1338 break; 1339 case MEM_GOING_OFFLINE: 1340 mutex_lock(&slab_mutex); 1341 ret = drain_cache_node_node(nid); 1342 mutex_unlock(&slab_mutex); 1343 break; 1344 case MEM_ONLINE: 1345 case MEM_OFFLINE: 1346 case MEM_CANCEL_ONLINE: 1347 case MEM_CANCEL_OFFLINE: 1348 break; 1349 } 1350 out: 1351 return notifier_from_errno(ret); 1352 } 1353 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1354 1355 /* 1356 * swap the static kmem_cache_node with kmalloced memory 1357 */ 1358 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1359 int nodeid) 1360 { 1361 struct kmem_cache_node *ptr; 1362 1363 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1364 BUG_ON(!ptr); 1365 1366 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1367 /* 1368 * Do not assume that spinlocks can be initialized via memcpy: 1369 */ 1370 spin_lock_init(&ptr->list_lock); 1371 1372 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1373 cachep->node[nodeid] = ptr; 1374 } 1375 1376 /* 1377 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1378 * size of kmem_cache_node. 1379 */ 1380 static void __init set_up_node(struct kmem_cache *cachep, int index) 1381 { 1382 int node; 1383 1384 for_each_online_node(node) { 1385 cachep->node[node] = &init_kmem_cache_node[index + node]; 1386 cachep->node[node]->next_reap = jiffies + 1387 REAPTIMEOUT_NODE + 1388 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1389 } 1390 } 1391 1392 /* 1393 * Initialisation. Called after the page allocator have been initialised and 1394 * before smp_init(). 1395 */ 1396 void __init kmem_cache_init(void) 1397 { 1398 int i; 1399 1400 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < 1401 sizeof(struct rcu_head)); 1402 kmem_cache = &kmem_cache_boot; 1403 1404 if (num_possible_nodes() == 1) 1405 use_alien_caches = 0; 1406 1407 for (i = 0; i < NUM_INIT_LISTS; i++) 1408 kmem_cache_node_init(&init_kmem_cache_node[i]); 1409 1410 /* 1411 * Fragmentation resistance on low memory - only use bigger 1412 * page orders on machines with more than 32MB of memory if 1413 * not overridden on the command line. 1414 */ 1415 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) 1416 slab_max_order = SLAB_MAX_ORDER_HI; 1417 1418 /* Bootstrap is tricky, because several objects are allocated 1419 * from caches that do not exist yet: 1420 * 1) initialize the kmem_cache cache: it contains the struct 1421 * kmem_cache structures of all caches, except kmem_cache itself: 1422 * kmem_cache is statically allocated. 1423 * Initially an __init data area is used for the head array and the 1424 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1425 * array at the end of the bootstrap. 1426 * 2) Create the first kmalloc cache. 1427 * The struct kmem_cache for the new cache is allocated normally. 1428 * An __init data area is used for the head array. 1429 * 3) Create the remaining kmalloc caches, with minimally sized 1430 * head arrays. 1431 * 4) Replace the __init data head arrays for kmem_cache and the first 1432 * kmalloc cache with kmalloc allocated arrays. 1433 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1434 * the other cache's with kmalloc allocated memory. 1435 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1436 */ 1437 1438 /* 1) create the kmem_cache */ 1439 1440 /* 1441 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1442 */ 1443 create_boot_cache(kmem_cache, "kmem_cache", 1444 offsetof(struct kmem_cache, node) + 1445 nr_node_ids * sizeof(struct kmem_cache_node *), 1446 SLAB_HWCACHE_ALIGN); 1447 list_add(&kmem_cache->list, &slab_caches); 1448 slab_state = PARTIAL; 1449 1450 /* 1451 * Initialize the caches that provide memory for the kmem_cache_node 1452 * structures first. Without this, further allocations will bug. 1453 */ 1454 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node", 1455 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); 1456 slab_state = PARTIAL_NODE; 1457 setup_kmalloc_cache_index_table(); 1458 1459 slab_early_init = 0; 1460 1461 /* 5) Replace the bootstrap kmem_cache_node */ 1462 { 1463 int nid; 1464 1465 for_each_online_node(nid) { 1466 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1467 1468 init_list(kmalloc_caches[INDEX_NODE], 1469 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1470 } 1471 } 1472 1473 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1474 } 1475 1476 void __init kmem_cache_init_late(void) 1477 { 1478 struct kmem_cache *cachep; 1479 1480 slab_state = UP; 1481 1482 /* 6) resize the head arrays to their final sizes */ 1483 mutex_lock(&slab_mutex); 1484 list_for_each_entry(cachep, &slab_caches, list) 1485 if (enable_cpucache(cachep, GFP_NOWAIT)) 1486 BUG(); 1487 mutex_unlock(&slab_mutex); 1488 1489 /* Done! */ 1490 slab_state = FULL; 1491 1492 /* 1493 * Register a cpu startup notifier callback that initializes 1494 * cpu_cache_get for all new cpus 1495 */ 1496 register_cpu_notifier(&cpucache_notifier); 1497 1498 #ifdef CONFIG_NUMA 1499 /* 1500 * Register a memory hotplug callback that initializes and frees 1501 * node. 1502 */ 1503 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1504 #endif 1505 1506 /* 1507 * The reap timers are started later, with a module init call: That part 1508 * of the kernel is not yet operational. 1509 */ 1510 } 1511 1512 static int __init cpucache_init(void) 1513 { 1514 int cpu; 1515 1516 /* 1517 * Register the timers that return unneeded pages to the page allocator 1518 */ 1519 for_each_online_cpu(cpu) 1520 start_cpu_timer(cpu); 1521 1522 /* Done! */ 1523 slab_state = FULL; 1524 return 0; 1525 } 1526 __initcall(cpucache_init); 1527 1528 static noinline void 1529 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1530 { 1531 #if DEBUG 1532 struct kmem_cache_node *n; 1533 struct page *page; 1534 unsigned long flags; 1535 int node; 1536 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 1537 DEFAULT_RATELIMIT_BURST); 1538 1539 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) 1540 return; 1541 1542 printk(KERN_WARNING 1543 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1544 nodeid, gfpflags); 1545 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", 1546 cachep->name, cachep->size, cachep->gfporder); 1547 1548 for_each_kmem_cache_node(cachep, node, n) { 1549 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1550 unsigned long active_slabs = 0, num_slabs = 0; 1551 1552 spin_lock_irqsave(&n->list_lock, flags); 1553 list_for_each_entry(page, &n->slabs_full, lru) { 1554 active_objs += cachep->num; 1555 active_slabs++; 1556 } 1557 list_for_each_entry(page, &n->slabs_partial, lru) { 1558 active_objs += page->active; 1559 active_slabs++; 1560 } 1561 list_for_each_entry(page, &n->slabs_free, lru) 1562 num_slabs++; 1563 1564 free_objects += n->free_objects; 1565 spin_unlock_irqrestore(&n->list_lock, flags); 1566 1567 num_slabs += active_slabs; 1568 num_objs = num_slabs * cachep->num; 1569 printk(KERN_WARNING 1570 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", 1571 node, active_slabs, num_slabs, active_objs, num_objs, 1572 free_objects); 1573 } 1574 #endif 1575 } 1576 1577 /* 1578 * Interface to system's page allocator. No need to hold the 1579 * kmem_cache_node ->list_lock. 1580 * 1581 * If we requested dmaable memory, we will get it. Even if we 1582 * did not request dmaable memory, we might get it, but that 1583 * would be relatively rare and ignorable. 1584 */ 1585 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1586 int nodeid) 1587 { 1588 struct page *page; 1589 int nr_pages; 1590 1591 flags |= cachep->allocflags; 1592 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1593 flags |= __GFP_RECLAIMABLE; 1594 1595 if (memcg_charge_slab(cachep, flags, cachep->gfporder)) 1596 return NULL; 1597 1598 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1599 if (!page) { 1600 memcg_uncharge_slab(cachep, cachep->gfporder); 1601 slab_out_of_memory(cachep, flags, nodeid); 1602 return NULL; 1603 } 1604 1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1606 if (page_is_pfmemalloc(page)) 1607 pfmemalloc_active = true; 1608 1609 nr_pages = (1 << cachep->gfporder); 1610 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1611 add_zone_page_state(page_zone(page), 1612 NR_SLAB_RECLAIMABLE, nr_pages); 1613 else 1614 add_zone_page_state(page_zone(page), 1615 NR_SLAB_UNRECLAIMABLE, nr_pages); 1616 __SetPageSlab(page); 1617 if (page_is_pfmemalloc(page)) 1618 SetPageSlabPfmemalloc(page); 1619 1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1621 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1622 1623 if (cachep->ctor) 1624 kmemcheck_mark_uninitialized_pages(page, nr_pages); 1625 else 1626 kmemcheck_mark_unallocated_pages(page, nr_pages); 1627 } 1628 1629 return page; 1630 } 1631 1632 /* 1633 * Interface to system's page release. 1634 */ 1635 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1636 { 1637 const unsigned long nr_freed = (1 << cachep->gfporder); 1638 1639 kmemcheck_free_shadow(page, cachep->gfporder); 1640 1641 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1642 sub_zone_page_state(page_zone(page), 1643 NR_SLAB_RECLAIMABLE, nr_freed); 1644 else 1645 sub_zone_page_state(page_zone(page), 1646 NR_SLAB_UNRECLAIMABLE, nr_freed); 1647 1648 BUG_ON(!PageSlab(page)); 1649 __ClearPageSlabPfmemalloc(page); 1650 __ClearPageSlab(page); 1651 page_mapcount_reset(page); 1652 page->mapping = NULL; 1653 1654 if (current->reclaim_state) 1655 current->reclaim_state->reclaimed_slab += nr_freed; 1656 __free_pages(page, cachep->gfporder); 1657 memcg_uncharge_slab(cachep, cachep->gfporder); 1658 } 1659 1660 static void kmem_rcu_free(struct rcu_head *head) 1661 { 1662 struct kmem_cache *cachep; 1663 struct page *page; 1664 1665 page = container_of(head, struct page, rcu_head); 1666 cachep = page->slab_cache; 1667 1668 kmem_freepages(cachep, page); 1669 } 1670 1671 #if DEBUG 1672 1673 #ifdef CONFIG_DEBUG_PAGEALLOC 1674 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1675 unsigned long caller) 1676 { 1677 int size = cachep->object_size; 1678 1679 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1680 1681 if (size < 5 * sizeof(unsigned long)) 1682 return; 1683 1684 *addr++ = 0x12345678; 1685 *addr++ = caller; 1686 *addr++ = smp_processor_id(); 1687 size -= 3 * sizeof(unsigned long); 1688 { 1689 unsigned long *sptr = &caller; 1690 unsigned long svalue; 1691 1692 while (!kstack_end(sptr)) { 1693 svalue = *sptr++; 1694 if (kernel_text_address(svalue)) { 1695 *addr++ = svalue; 1696 size -= sizeof(unsigned long); 1697 if (size <= sizeof(unsigned long)) 1698 break; 1699 } 1700 } 1701 1702 } 1703 *addr++ = 0x87654321; 1704 } 1705 #endif 1706 1707 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1708 { 1709 int size = cachep->object_size; 1710 addr = &((char *)addr)[obj_offset(cachep)]; 1711 1712 memset(addr, val, size); 1713 *(unsigned char *)(addr + size - 1) = POISON_END; 1714 } 1715 1716 static void dump_line(char *data, int offset, int limit) 1717 { 1718 int i; 1719 unsigned char error = 0; 1720 int bad_count = 0; 1721 1722 printk(KERN_ERR "%03x: ", offset); 1723 for (i = 0; i < limit; i++) { 1724 if (data[offset + i] != POISON_FREE) { 1725 error = data[offset + i]; 1726 bad_count++; 1727 } 1728 } 1729 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1730 &data[offset], limit, 1); 1731 1732 if (bad_count == 1) { 1733 error ^= POISON_FREE; 1734 if (!(error & (error - 1))) { 1735 printk(KERN_ERR "Single bit error detected. Probably " 1736 "bad RAM.\n"); 1737 #ifdef CONFIG_X86 1738 printk(KERN_ERR "Run memtest86+ or a similar memory " 1739 "test tool.\n"); 1740 #else 1741 printk(KERN_ERR "Run a memory test tool.\n"); 1742 #endif 1743 } 1744 } 1745 } 1746 #endif 1747 1748 #if DEBUG 1749 1750 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1751 { 1752 int i, size; 1753 char *realobj; 1754 1755 if (cachep->flags & SLAB_RED_ZONE) { 1756 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1757 *dbg_redzone1(cachep, objp), 1758 *dbg_redzone2(cachep, objp)); 1759 } 1760 1761 if (cachep->flags & SLAB_STORE_USER) { 1762 printk(KERN_ERR "Last user: [<%p>](%pSR)\n", 1763 *dbg_userword(cachep, objp), 1764 *dbg_userword(cachep, objp)); 1765 } 1766 realobj = (char *)objp + obj_offset(cachep); 1767 size = cachep->object_size; 1768 for (i = 0; i < size && lines; i += 16, lines--) { 1769 int limit; 1770 limit = 16; 1771 if (i + limit > size) 1772 limit = size - i; 1773 dump_line(realobj, i, limit); 1774 } 1775 } 1776 1777 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1778 { 1779 char *realobj; 1780 int size, i; 1781 int lines = 0; 1782 1783 realobj = (char *)objp + obj_offset(cachep); 1784 size = cachep->object_size; 1785 1786 for (i = 0; i < size; i++) { 1787 char exp = POISON_FREE; 1788 if (i == size - 1) 1789 exp = POISON_END; 1790 if (realobj[i] != exp) { 1791 int limit; 1792 /* Mismatch ! */ 1793 /* Print header */ 1794 if (lines == 0) { 1795 printk(KERN_ERR 1796 "Slab corruption (%s): %s start=%p, len=%d\n", 1797 print_tainted(), cachep->name, realobj, size); 1798 print_objinfo(cachep, objp, 0); 1799 } 1800 /* Hexdump the affected line */ 1801 i = (i / 16) * 16; 1802 limit = 16; 1803 if (i + limit > size) 1804 limit = size - i; 1805 dump_line(realobj, i, limit); 1806 i += 16; 1807 lines++; 1808 /* Limit to 5 lines */ 1809 if (lines > 5) 1810 break; 1811 } 1812 } 1813 if (lines != 0) { 1814 /* Print some data about the neighboring objects, if they 1815 * exist: 1816 */ 1817 struct page *page = virt_to_head_page(objp); 1818 unsigned int objnr; 1819 1820 objnr = obj_to_index(cachep, page, objp); 1821 if (objnr) { 1822 objp = index_to_obj(cachep, page, objnr - 1); 1823 realobj = (char *)objp + obj_offset(cachep); 1824 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1825 realobj, size); 1826 print_objinfo(cachep, objp, 2); 1827 } 1828 if (objnr + 1 < cachep->num) { 1829 objp = index_to_obj(cachep, page, objnr + 1); 1830 realobj = (char *)objp + obj_offset(cachep); 1831 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1832 realobj, size); 1833 print_objinfo(cachep, objp, 2); 1834 } 1835 } 1836 } 1837 #endif 1838 1839 #if DEBUG 1840 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1841 struct page *page) 1842 { 1843 int i; 1844 for (i = 0; i < cachep->num; i++) { 1845 void *objp = index_to_obj(cachep, page, i); 1846 1847 if (cachep->flags & SLAB_POISON) { 1848 #ifdef CONFIG_DEBUG_PAGEALLOC 1849 if (cachep->size % PAGE_SIZE == 0 && 1850 OFF_SLAB(cachep)) 1851 kernel_map_pages(virt_to_page(objp), 1852 cachep->size / PAGE_SIZE, 1); 1853 else 1854 check_poison_obj(cachep, objp); 1855 #else 1856 check_poison_obj(cachep, objp); 1857 #endif 1858 } 1859 if (cachep->flags & SLAB_RED_ZONE) { 1860 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1861 slab_error(cachep, "start of a freed object " 1862 "was overwritten"); 1863 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1864 slab_error(cachep, "end of a freed object " 1865 "was overwritten"); 1866 } 1867 } 1868 } 1869 #else 1870 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1871 struct page *page) 1872 { 1873 } 1874 #endif 1875 1876 /** 1877 * slab_destroy - destroy and release all objects in a slab 1878 * @cachep: cache pointer being destroyed 1879 * @page: page pointer being destroyed 1880 * 1881 * Destroy all the objs in a slab page, and release the mem back to the system. 1882 * Before calling the slab page must have been unlinked from the cache. The 1883 * kmem_cache_node ->list_lock is not held/needed. 1884 */ 1885 static void slab_destroy(struct kmem_cache *cachep, struct page *page) 1886 { 1887 void *freelist; 1888 1889 freelist = page->freelist; 1890 slab_destroy_debugcheck(cachep, page); 1891 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1892 struct rcu_head *head; 1893 1894 /* 1895 * RCU free overloads the RCU head over the LRU. 1896 * slab_page has been overloeaded over the LRU, 1897 * however it is not used from now on so that 1898 * we can use it safely. 1899 */ 1900 head = (void *)&page->rcu_head; 1901 call_rcu(head, kmem_rcu_free); 1902 1903 } else { 1904 kmem_freepages(cachep, page); 1905 } 1906 1907 /* 1908 * From now on, we don't use freelist 1909 * although actual page can be freed in rcu context 1910 */ 1911 if (OFF_SLAB(cachep)) 1912 kmem_cache_free(cachep->freelist_cache, freelist); 1913 } 1914 1915 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) 1916 { 1917 struct page *page, *n; 1918 1919 list_for_each_entry_safe(page, n, list, lru) { 1920 list_del(&page->lru); 1921 slab_destroy(cachep, page); 1922 } 1923 } 1924 1925 /** 1926 * calculate_slab_order - calculate size (page order) of slabs 1927 * @cachep: pointer to the cache that is being created 1928 * @size: size of objects to be created in this cache. 1929 * @align: required alignment for the objects. 1930 * @flags: slab allocation flags 1931 * 1932 * Also calculates the number of objects per slab. 1933 * 1934 * This could be made much more intelligent. For now, try to avoid using 1935 * high order pages for slabs. When the gfp() functions are more friendly 1936 * towards high-order requests, this should be changed. 1937 */ 1938 static size_t calculate_slab_order(struct kmem_cache *cachep, 1939 size_t size, size_t align, unsigned long flags) 1940 { 1941 unsigned long offslab_limit; 1942 size_t left_over = 0; 1943 int gfporder; 1944 1945 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1946 unsigned int num; 1947 size_t remainder; 1948 1949 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1950 if (!num) 1951 continue; 1952 1953 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 1954 if (num > SLAB_OBJ_MAX_NUM) 1955 break; 1956 1957 if (flags & CFLGS_OFF_SLAB) { 1958 size_t freelist_size_per_obj = sizeof(freelist_idx_t); 1959 /* 1960 * Max number of objs-per-slab for caches which 1961 * use off-slab slabs. Needed to avoid a possible 1962 * looping condition in cache_grow(). 1963 */ 1964 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) 1965 freelist_size_per_obj += sizeof(char); 1966 offslab_limit = size; 1967 offslab_limit /= freelist_size_per_obj; 1968 1969 if (num > offslab_limit) 1970 break; 1971 } 1972 1973 /* Found something acceptable - save it away */ 1974 cachep->num = num; 1975 cachep->gfporder = gfporder; 1976 left_over = remainder; 1977 1978 /* 1979 * A VFS-reclaimable slab tends to have most allocations 1980 * as GFP_NOFS and we really don't want to have to be allocating 1981 * higher-order pages when we are unable to shrink dcache. 1982 */ 1983 if (flags & SLAB_RECLAIM_ACCOUNT) 1984 break; 1985 1986 /* 1987 * Large number of objects is good, but very large slabs are 1988 * currently bad for the gfp()s. 1989 */ 1990 if (gfporder >= slab_max_order) 1991 break; 1992 1993 /* 1994 * Acceptable internal fragmentation? 1995 */ 1996 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 1997 break; 1998 } 1999 return left_over; 2000 } 2001 2002 static struct array_cache __percpu *alloc_kmem_cache_cpus( 2003 struct kmem_cache *cachep, int entries, int batchcount) 2004 { 2005 int cpu; 2006 size_t size; 2007 struct array_cache __percpu *cpu_cache; 2008 2009 size = sizeof(void *) * entries + sizeof(struct array_cache); 2010 cpu_cache = __alloc_percpu(size, sizeof(void *)); 2011 2012 if (!cpu_cache) 2013 return NULL; 2014 2015 for_each_possible_cpu(cpu) { 2016 init_arraycache(per_cpu_ptr(cpu_cache, cpu), 2017 entries, batchcount); 2018 } 2019 2020 return cpu_cache; 2021 } 2022 2023 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2024 { 2025 if (slab_state >= FULL) 2026 return enable_cpucache(cachep, gfp); 2027 2028 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); 2029 if (!cachep->cpu_cache) 2030 return 1; 2031 2032 if (slab_state == DOWN) { 2033 /* Creation of first cache (kmem_cache). */ 2034 set_up_node(kmem_cache, CACHE_CACHE); 2035 } else if (slab_state == PARTIAL) { 2036 /* For kmem_cache_node */ 2037 set_up_node(cachep, SIZE_NODE); 2038 } else { 2039 int node; 2040 2041 for_each_online_node(node) { 2042 cachep->node[node] = kmalloc_node( 2043 sizeof(struct kmem_cache_node), gfp, node); 2044 BUG_ON(!cachep->node[node]); 2045 kmem_cache_node_init(cachep->node[node]); 2046 } 2047 } 2048 2049 cachep->node[numa_mem_id()]->next_reap = 2050 jiffies + REAPTIMEOUT_NODE + 2051 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 2052 2053 cpu_cache_get(cachep)->avail = 0; 2054 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2055 cpu_cache_get(cachep)->batchcount = 1; 2056 cpu_cache_get(cachep)->touched = 0; 2057 cachep->batchcount = 1; 2058 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2059 return 0; 2060 } 2061 2062 unsigned long kmem_cache_flags(unsigned long object_size, 2063 unsigned long flags, const char *name, 2064 void (*ctor)(void *)) 2065 { 2066 return flags; 2067 } 2068 2069 struct kmem_cache * 2070 __kmem_cache_alias(const char *name, size_t size, size_t align, 2071 unsigned long flags, void (*ctor)(void *)) 2072 { 2073 struct kmem_cache *cachep; 2074 2075 cachep = find_mergeable(size, align, flags, name, ctor); 2076 if (cachep) { 2077 cachep->refcount++; 2078 2079 /* 2080 * Adjust the object sizes so that we clear 2081 * the complete object on kzalloc. 2082 */ 2083 cachep->object_size = max_t(int, cachep->object_size, size); 2084 } 2085 return cachep; 2086 } 2087 2088 /** 2089 * __kmem_cache_create - Create a cache. 2090 * @cachep: cache management descriptor 2091 * @flags: SLAB flags 2092 * 2093 * Returns a ptr to the cache on success, NULL on failure. 2094 * Cannot be called within a int, but can be interrupted. 2095 * The @ctor is run when new pages are allocated by the cache. 2096 * 2097 * The flags are 2098 * 2099 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2100 * to catch references to uninitialised memory. 2101 * 2102 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2103 * for buffer overruns. 2104 * 2105 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2106 * cacheline. This can be beneficial if you're counting cycles as closely 2107 * as davem. 2108 */ 2109 int 2110 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2111 { 2112 size_t left_over, freelist_size; 2113 size_t ralign = BYTES_PER_WORD; 2114 gfp_t gfp; 2115 int err; 2116 size_t size = cachep->size; 2117 2118 #if DEBUG 2119 #if FORCED_DEBUG 2120 /* 2121 * Enable redzoning and last user accounting, except for caches with 2122 * large objects, if the increased size would increase the object size 2123 * above the next power of two: caches with object sizes just above a 2124 * power of two have a significant amount of internal fragmentation. 2125 */ 2126 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2127 2 * sizeof(unsigned long long))) 2128 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2129 if (!(flags & SLAB_DESTROY_BY_RCU)) 2130 flags |= SLAB_POISON; 2131 #endif 2132 if (flags & SLAB_DESTROY_BY_RCU) 2133 BUG_ON(flags & SLAB_POISON); 2134 #endif 2135 2136 /* 2137 * Check that size is in terms of words. This is needed to avoid 2138 * unaligned accesses for some archs when redzoning is used, and makes 2139 * sure any on-slab bufctl's are also correctly aligned. 2140 */ 2141 if (size & (BYTES_PER_WORD - 1)) { 2142 size += (BYTES_PER_WORD - 1); 2143 size &= ~(BYTES_PER_WORD - 1); 2144 } 2145 2146 if (flags & SLAB_RED_ZONE) { 2147 ralign = REDZONE_ALIGN; 2148 /* If redzoning, ensure that the second redzone is suitably 2149 * aligned, by adjusting the object size accordingly. */ 2150 size += REDZONE_ALIGN - 1; 2151 size &= ~(REDZONE_ALIGN - 1); 2152 } 2153 2154 /* 3) caller mandated alignment */ 2155 if (ralign < cachep->align) { 2156 ralign = cachep->align; 2157 } 2158 /* disable debug if necessary */ 2159 if (ralign > __alignof__(unsigned long long)) 2160 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2161 /* 2162 * 4) Store it. 2163 */ 2164 cachep->align = ralign; 2165 2166 if (slab_is_available()) 2167 gfp = GFP_KERNEL; 2168 else 2169 gfp = GFP_NOWAIT; 2170 2171 #if DEBUG 2172 2173 /* 2174 * Both debugging options require word-alignment which is calculated 2175 * into align above. 2176 */ 2177 if (flags & SLAB_RED_ZONE) { 2178 /* add space for red zone words */ 2179 cachep->obj_offset += sizeof(unsigned long long); 2180 size += 2 * sizeof(unsigned long long); 2181 } 2182 if (flags & SLAB_STORE_USER) { 2183 /* user store requires one word storage behind the end of 2184 * the real object. But if the second red zone needs to be 2185 * aligned to 64 bits, we must allow that much space. 2186 */ 2187 if (flags & SLAB_RED_ZONE) 2188 size += REDZONE_ALIGN; 2189 else 2190 size += BYTES_PER_WORD; 2191 } 2192 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2193 if (size >= kmalloc_size(INDEX_NODE + 1) 2194 && cachep->object_size > cache_line_size() 2195 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2196 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2197 size = PAGE_SIZE; 2198 } 2199 #endif 2200 #endif 2201 2202 /* 2203 * Determine if the slab management is 'on' or 'off' slab. 2204 * (bootstrapping cannot cope with offslab caches so don't do 2205 * it too early on. Always use on-slab management when 2206 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) 2207 */ 2208 if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init && 2209 !(flags & SLAB_NOLEAKTRACE)) 2210 /* 2211 * Size is large, assume best to place the slab management obj 2212 * off-slab (should allow better packing of objs). 2213 */ 2214 flags |= CFLGS_OFF_SLAB; 2215 2216 size = ALIGN(size, cachep->align); 2217 /* 2218 * We should restrict the number of objects in a slab to implement 2219 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2220 */ 2221 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2222 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2223 2224 left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2225 2226 if (!cachep->num) 2227 return -E2BIG; 2228 2229 freelist_size = calculate_freelist_size(cachep->num, cachep->align); 2230 2231 /* 2232 * If the slab has been placed off-slab, and we have enough space then 2233 * move it on-slab. This is at the expense of any extra colouring. 2234 */ 2235 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { 2236 flags &= ~CFLGS_OFF_SLAB; 2237 left_over -= freelist_size; 2238 } 2239 2240 if (flags & CFLGS_OFF_SLAB) { 2241 /* really off slab. No need for manual alignment */ 2242 freelist_size = calculate_freelist_size(cachep->num, 0); 2243 2244 #ifdef CONFIG_PAGE_POISONING 2245 /* If we're going to use the generic kernel_map_pages() 2246 * poisoning, then it's going to smash the contents of 2247 * the redzone and userword anyhow, so switch them off. 2248 */ 2249 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) 2250 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2251 #endif 2252 } 2253 2254 cachep->colour_off = cache_line_size(); 2255 /* Offset must be a multiple of the alignment. */ 2256 if (cachep->colour_off < cachep->align) 2257 cachep->colour_off = cachep->align; 2258 cachep->colour = left_over / cachep->colour_off; 2259 cachep->freelist_size = freelist_size; 2260 cachep->flags = flags; 2261 cachep->allocflags = __GFP_COMP; 2262 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2263 cachep->allocflags |= GFP_DMA; 2264 cachep->size = size; 2265 cachep->reciprocal_buffer_size = reciprocal_value(size); 2266 2267 if (flags & CFLGS_OFF_SLAB) { 2268 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2269 /* 2270 * This is a possibility for one of the kmalloc_{dma,}_caches. 2271 * But since we go off slab only for object size greater than 2272 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created 2273 * in ascending order,this should not happen at all. 2274 * But leave a BUG_ON for some lucky dude. 2275 */ 2276 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); 2277 } 2278 2279 err = setup_cpu_cache(cachep, gfp); 2280 if (err) { 2281 __kmem_cache_shutdown(cachep); 2282 return err; 2283 } 2284 2285 return 0; 2286 } 2287 2288 #if DEBUG 2289 static void check_irq_off(void) 2290 { 2291 BUG_ON(!irqs_disabled()); 2292 } 2293 2294 static void check_irq_on(void) 2295 { 2296 BUG_ON(irqs_disabled()); 2297 } 2298 2299 static void check_spinlock_acquired(struct kmem_cache *cachep) 2300 { 2301 #ifdef CONFIG_SMP 2302 check_irq_off(); 2303 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); 2304 #endif 2305 } 2306 2307 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2308 { 2309 #ifdef CONFIG_SMP 2310 check_irq_off(); 2311 assert_spin_locked(&get_node(cachep, node)->list_lock); 2312 #endif 2313 } 2314 2315 #else 2316 #define check_irq_off() do { } while(0) 2317 #define check_irq_on() do { } while(0) 2318 #define check_spinlock_acquired(x) do { } while(0) 2319 #define check_spinlock_acquired_node(x, y) do { } while(0) 2320 #endif 2321 2322 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 2323 struct array_cache *ac, 2324 int force, int node); 2325 2326 static void do_drain(void *arg) 2327 { 2328 struct kmem_cache *cachep = arg; 2329 struct array_cache *ac; 2330 int node = numa_mem_id(); 2331 struct kmem_cache_node *n; 2332 LIST_HEAD(list); 2333 2334 check_irq_off(); 2335 ac = cpu_cache_get(cachep); 2336 n = get_node(cachep, node); 2337 spin_lock(&n->list_lock); 2338 free_block(cachep, ac->entry, ac->avail, node, &list); 2339 spin_unlock(&n->list_lock); 2340 slabs_destroy(cachep, &list); 2341 ac->avail = 0; 2342 } 2343 2344 static void drain_cpu_caches(struct kmem_cache *cachep) 2345 { 2346 struct kmem_cache_node *n; 2347 int node; 2348 2349 on_each_cpu(do_drain, cachep, 1); 2350 check_irq_on(); 2351 for_each_kmem_cache_node(cachep, node, n) 2352 if (n->alien) 2353 drain_alien_cache(cachep, n->alien); 2354 2355 for_each_kmem_cache_node(cachep, node, n) 2356 drain_array(cachep, n, n->shared, 1, node); 2357 } 2358 2359 /* 2360 * Remove slabs from the list of free slabs. 2361 * Specify the number of slabs to drain in tofree. 2362 * 2363 * Returns the actual number of slabs released. 2364 */ 2365 static int drain_freelist(struct kmem_cache *cache, 2366 struct kmem_cache_node *n, int tofree) 2367 { 2368 struct list_head *p; 2369 int nr_freed; 2370 struct page *page; 2371 2372 nr_freed = 0; 2373 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2374 2375 spin_lock_irq(&n->list_lock); 2376 p = n->slabs_free.prev; 2377 if (p == &n->slabs_free) { 2378 spin_unlock_irq(&n->list_lock); 2379 goto out; 2380 } 2381 2382 page = list_entry(p, struct page, lru); 2383 #if DEBUG 2384 BUG_ON(page->active); 2385 #endif 2386 list_del(&page->lru); 2387 /* 2388 * Safe to drop the lock. The slab is no longer linked 2389 * to the cache. 2390 */ 2391 n->free_objects -= cache->num; 2392 spin_unlock_irq(&n->list_lock); 2393 slab_destroy(cache, page); 2394 nr_freed++; 2395 } 2396 out: 2397 return nr_freed; 2398 } 2399 2400 int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) 2401 { 2402 int ret = 0; 2403 int node; 2404 struct kmem_cache_node *n; 2405 2406 drain_cpu_caches(cachep); 2407 2408 check_irq_on(); 2409 for_each_kmem_cache_node(cachep, node, n) { 2410 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 2411 2412 ret += !list_empty(&n->slabs_full) || 2413 !list_empty(&n->slabs_partial); 2414 } 2415 return (ret ? 1 : 0); 2416 } 2417 2418 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2419 { 2420 int i; 2421 struct kmem_cache_node *n; 2422 int rc = __kmem_cache_shrink(cachep, false); 2423 2424 if (rc) 2425 return rc; 2426 2427 free_percpu(cachep->cpu_cache); 2428 2429 /* NUMA: free the node structures */ 2430 for_each_kmem_cache_node(cachep, i, n) { 2431 kfree(n->shared); 2432 free_alien_cache(n->alien); 2433 kfree(n); 2434 cachep->node[i] = NULL; 2435 } 2436 return 0; 2437 } 2438 2439 /* 2440 * Get the memory for a slab management obj. 2441 * 2442 * For a slab cache when the slab descriptor is off-slab, the 2443 * slab descriptor can't come from the same cache which is being created, 2444 * Because if it is the case, that means we defer the creation of 2445 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2446 * And we eventually call down to __kmem_cache_create(), which 2447 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. 2448 * This is a "chicken-and-egg" problem. 2449 * 2450 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2451 * which are all initialized during kmem_cache_init(). 2452 */ 2453 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2454 struct page *page, int colour_off, 2455 gfp_t local_flags, int nodeid) 2456 { 2457 void *freelist; 2458 void *addr = page_address(page); 2459 2460 if (OFF_SLAB(cachep)) { 2461 /* Slab management obj is off-slab. */ 2462 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2463 local_flags, nodeid); 2464 if (!freelist) 2465 return NULL; 2466 } else { 2467 freelist = addr + colour_off; 2468 colour_off += cachep->freelist_size; 2469 } 2470 page->active = 0; 2471 page->s_mem = addr + colour_off; 2472 return freelist; 2473 } 2474 2475 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) 2476 { 2477 return ((freelist_idx_t *)page->freelist)[idx]; 2478 } 2479 2480 static inline void set_free_obj(struct page *page, 2481 unsigned int idx, freelist_idx_t val) 2482 { 2483 ((freelist_idx_t *)(page->freelist))[idx] = val; 2484 } 2485 2486 static void cache_init_objs(struct kmem_cache *cachep, 2487 struct page *page) 2488 { 2489 int i; 2490 2491 for (i = 0; i < cachep->num; i++) { 2492 void *objp = index_to_obj(cachep, page, i); 2493 #if DEBUG 2494 /* need to poison the objs? */ 2495 if (cachep->flags & SLAB_POISON) 2496 poison_obj(cachep, objp, POISON_FREE); 2497 if (cachep->flags & SLAB_STORE_USER) 2498 *dbg_userword(cachep, objp) = NULL; 2499 2500 if (cachep->flags & SLAB_RED_ZONE) { 2501 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2502 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2503 } 2504 /* 2505 * Constructors are not allowed to allocate memory from the same 2506 * cache which they are a constructor for. Otherwise, deadlock. 2507 * They must also be threaded. 2508 */ 2509 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2510 cachep->ctor(objp + obj_offset(cachep)); 2511 2512 if (cachep->flags & SLAB_RED_ZONE) { 2513 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2514 slab_error(cachep, "constructor overwrote the" 2515 " end of an object"); 2516 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2517 slab_error(cachep, "constructor overwrote the" 2518 " start of an object"); 2519 } 2520 if ((cachep->size % PAGE_SIZE) == 0 && 2521 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2522 kernel_map_pages(virt_to_page(objp), 2523 cachep->size / PAGE_SIZE, 0); 2524 #else 2525 if (cachep->ctor) 2526 cachep->ctor(objp); 2527 #endif 2528 set_obj_status(page, i, OBJECT_FREE); 2529 set_free_obj(page, i, i); 2530 } 2531 } 2532 2533 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2534 { 2535 if (CONFIG_ZONE_DMA_FLAG) { 2536 if (flags & GFP_DMA) 2537 BUG_ON(!(cachep->allocflags & GFP_DMA)); 2538 else 2539 BUG_ON(cachep->allocflags & GFP_DMA); 2540 } 2541 } 2542 2543 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, 2544 int nodeid) 2545 { 2546 void *objp; 2547 2548 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); 2549 page->active++; 2550 #if DEBUG 2551 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2552 #endif 2553 2554 return objp; 2555 } 2556 2557 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, 2558 void *objp, int nodeid) 2559 { 2560 unsigned int objnr = obj_to_index(cachep, page, objp); 2561 #if DEBUG 2562 unsigned int i; 2563 2564 /* Verify that the slab belongs to the intended node */ 2565 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2566 2567 /* Verify double free bug */ 2568 for (i = page->active; i < cachep->num; i++) { 2569 if (get_free_obj(page, i) == objnr) { 2570 printk(KERN_ERR "slab: double free detected in cache " 2571 "'%s', objp %p\n", cachep->name, objp); 2572 BUG(); 2573 } 2574 } 2575 #endif 2576 page->active--; 2577 set_free_obj(page, page->active, objnr); 2578 } 2579 2580 /* 2581 * Map pages beginning at addr to the given cache and slab. This is required 2582 * for the slab allocator to be able to lookup the cache and slab of a 2583 * virtual address for kfree, ksize, and slab debugging. 2584 */ 2585 static void slab_map_pages(struct kmem_cache *cache, struct page *page, 2586 void *freelist) 2587 { 2588 page->slab_cache = cache; 2589 page->freelist = freelist; 2590 } 2591 2592 /* 2593 * Grow (by 1) the number of slabs within a cache. This is called by 2594 * kmem_cache_alloc() when there are no active objs left in a cache. 2595 */ 2596 static int cache_grow(struct kmem_cache *cachep, 2597 gfp_t flags, int nodeid, struct page *page) 2598 { 2599 void *freelist; 2600 size_t offset; 2601 gfp_t local_flags; 2602 struct kmem_cache_node *n; 2603 2604 /* 2605 * Be lazy and only check for valid flags here, keeping it out of the 2606 * critical path in kmem_cache_alloc(). 2607 */ 2608 if (unlikely(flags & GFP_SLAB_BUG_MASK)) { 2609 pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); 2610 BUG(); 2611 } 2612 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2613 2614 /* Take the node list lock to change the colour_next on this node */ 2615 check_irq_off(); 2616 n = get_node(cachep, nodeid); 2617 spin_lock(&n->list_lock); 2618 2619 /* Get colour for the slab, and cal the next value. */ 2620 offset = n->colour_next; 2621 n->colour_next++; 2622 if (n->colour_next >= cachep->colour) 2623 n->colour_next = 0; 2624 spin_unlock(&n->list_lock); 2625 2626 offset *= cachep->colour_off; 2627 2628 if (local_flags & __GFP_WAIT) 2629 local_irq_enable(); 2630 2631 /* 2632 * The test for missing atomic flag is performed here, rather than 2633 * the more obvious place, simply to reduce the critical path length 2634 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2635 * will eventually be caught here (where it matters). 2636 */ 2637 kmem_flagcheck(cachep, flags); 2638 2639 /* 2640 * Get mem for the objs. Attempt to allocate a physical page from 2641 * 'nodeid'. 2642 */ 2643 if (!page) 2644 page = kmem_getpages(cachep, local_flags, nodeid); 2645 if (!page) 2646 goto failed; 2647 2648 /* Get slab management. */ 2649 freelist = alloc_slabmgmt(cachep, page, offset, 2650 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2651 if (!freelist) 2652 goto opps1; 2653 2654 slab_map_pages(cachep, page, freelist); 2655 2656 cache_init_objs(cachep, page); 2657 2658 if (local_flags & __GFP_WAIT) 2659 local_irq_disable(); 2660 check_irq_off(); 2661 spin_lock(&n->list_lock); 2662 2663 /* Make slab active. */ 2664 list_add_tail(&page->lru, &(n->slabs_free)); 2665 STATS_INC_GROWN(cachep); 2666 n->free_objects += cachep->num; 2667 spin_unlock(&n->list_lock); 2668 return 1; 2669 opps1: 2670 kmem_freepages(cachep, page); 2671 failed: 2672 if (local_flags & __GFP_WAIT) 2673 local_irq_disable(); 2674 return 0; 2675 } 2676 2677 #if DEBUG 2678 2679 /* 2680 * Perform extra freeing checks: 2681 * - detect bad pointers. 2682 * - POISON/RED_ZONE checking 2683 */ 2684 static void kfree_debugcheck(const void *objp) 2685 { 2686 if (!virt_addr_valid(objp)) { 2687 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2688 (unsigned long)objp); 2689 BUG(); 2690 } 2691 } 2692 2693 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2694 { 2695 unsigned long long redzone1, redzone2; 2696 2697 redzone1 = *dbg_redzone1(cache, obj); 2698 redzone2 = *dbg_redzone2(cache, obj); 2699 2700 /* 2701 * Redzone is ok. 2702 */ 2703 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2704 return; 2705 2706 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2707 slab_error(cache, "double free detected"); 2708 else 2709 slab_error(cache, "memory outside object was overwritten"); 2710 2711 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2712 obj, redzone1, redzone2); 2713 } 2714 2715 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2716 unsigned long caller) 2717 { 2718 unsigned int objnr; 2719 struct page *page; 2720 2721 BUG_ON(virt_to_cache(objp) != cachep); 2722 2723 objp -= obj_offset(cachep); 2724 kfree_debugcheck(objp); 2725 page = virt_to_head_page(objp); 2726 2727 if (cachep->flags & SLAB_RED_ZONE) { 2728 verify_redzone_free(cachep, objp); 2729 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2730 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2731 } 2732 if (cachep->flags & SLAB_STORE_USER) 2733 *dbg_userword(cachep, objp) = (void *)caller; 2734 2735 objnr = obj_to_index(cachep, page, objp); 2736 2737 BUG_ON(objnr >= cachep->num); 2738 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2739 2740 set_obj_status(page, objnr, OBJECT_FREE); 2741 if (cachep->flags & SLAB_POISON) { 2742 #ifdef CONFIG_DEBUG_PAGEALLOC 2743 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2744 store_stackinfo(cachep, objp, caller); 2745 kernel_map_pages(virt_to_page(objp), 2746 cachep->size / PAGE_SIZE, 0); 2747 } else { 2748 poison_obj(cachep, objp, POISON_FREE); 2749 } 2750 #else 2751 poison_obj(cachep, objp, POISON_FREE); 2752 #endif 2753 } 2754 return objp; 2755 } 2756 2757 #else 2758 #define kfree_debugcheck(x) do { } while(0) 2759 #define cache_free_debugcheck(x,objp,z) (objp) 2760 #endif 2761 2762 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, 2763 bool force_refill) 2764 { 2765 int batchcount; 2766 struct kmem_cache_node *n; 2767 struct array_cache *ac; 2768 int node; 2769 2770 check_irq_off(); 2771 node = numa_mem_id(); 2772 if (unlikely(force_refill)) 2773 goto force_grow; 2774 retry: 2775 ac = cpu_cache_get(cachep); 2776 batchcount = ac->batchcount; 2777 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2778 /* 2779 * If there was little recent activity on this cache, then 2780 * perform only a partial refill. Otherwise we could generate 2781 * refill bouncing. 2782 */ 2783 batchcount = BATCHREFILL_LIMIT; 2784 } 2785 n = get_node(cachep, node); 2786 2787 BUG_ON(ac->avail > 0 || !n); 2788 spin_lock(&n->list_lock); 2789 2790 /* See if we can refill from the shared array */ 2791 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { 2792 n->shared->touched = 1; 2793 goto alloc_done; 2794 } 2795 2796 while (batchcount > 0) { 2797 struct list_head *entry; 2798 struct page *page; 2799 /* Get slab alloc is to come from. */ 2800 entry = n->slabs_partial.next; 2801 if (entry == &n->slabs_partial) { 2802 n->free_touched = 1; 2803 entry = n->slabs_free.next; 2804 if (entry == &n->slabs_free) 2805 goto must_grow; 2806 } 2807 2808 page = list_entry(entry, struct page, lru); 2809 check_spinlock_acquired(cachep); 2810 2811 /* 2812 * The slab was either on partial or free list so 2813 * there must be at least one object available for 2814 * allocation. 2815 */ 2816 BUG_ON(page->active >= cachep->num); 2817 2818 while (page->active < cachep->num && batchcount--) { 2819 STATS_INC_ALLOCED(cachep); 2820 STATS_INC_ACTIVE(cachep); 2821 STATS_SET_HIGH(cachep); 2822 2823 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, 2824 node)); 2825 } 2826 2827 /* move slabp to correct slabp list: */ 2828 list_del(&page->lru); 2829 if (page->active == cachep->num) 2830 list_add(&page->lru, &n->slabs_full); 2831 else 2832 list_add(&page->lru, &n->slabs_partial); 2833 } 2834 2835 must_grow: 2836 n->free_objects -= ac->avail; 2837 alloc_done: 2838 spin_unlock(&n->list_lock); 2839 2840 if (unlikely(!ac->avail)) { 2841 int x; 2842 force_grow: 2843 x = cache_grow(cachep, gfp_exact_node(flags), node, NULL); 2844 2845 /* cache_grow can reenable interrupts, then ac could change. */ 2846 ac = cpu_cache_get(cachep); 2847 node = numa_mem_id(); 2848 2849 /* no objects in sight? abort */ 2850 if (!x && (ac->avail == 0 || force_refill)) 2851 return NULL; 2852 2853 if (!ac->avail) /* objects refilled by interrupt? */ 2854 goto retry; 2855 } 2856 ac->touched = 1; 2857 2858 return ac_get_obj(cachep, ac, flags, force_refill); 2859 } 2860 2861 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 2862 gfp_t flags) 2863 { 2864 might_sleep_if(flags & __GFP_WAIT); 2865 #if DEBUG 2866 kmem_flagcheck(cachep, flags); 2867 #endif 2868 } 2869 2870 #if DEBUG 2871 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2872 gfp_t flags, void *objp, unsigned long caller) 2873 { 2874 struct page *page; 2875 2876 if (!objp) 2877 return objp; 2878 if (cachep->flags & SLAB_POISON) { 2879 #ifdef CONFIG_DEBUG_PAGEALLOC 2880 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2881 kernel_map_pages(virt_to_page(objp), 2882 cachep->size / PAGE_SIZE, 1); 2883 else 2884 check_poison_obj(cachep, objp); 2885 #else 2886 check_poison_obj(cachep, objp); 2887 #endif 2888 poison_obj(cachep, objp, POISON_INUSE); 2889 } 2890 if (cachep->flags & SLAB_STORE_USER) 2891 *dbg_userword(cachep, objp) = (void *)caller; 2892 2893 if (cachep->flags & SLAB_RED_ZONE) { 2894 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2895 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2896 slab_error(cachep, "double free, or memory outside" 2897 " object was overwritten"); 2898 printk(KERN_ERR 2899 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2900 objp, *dbg_redzone1(cachep, objp), 2901 *dbg_redzone2(cachep, objp)); 2902 } 2903 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2904 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2905 } 2906 2907 page = virt_to_head_page(objp); 2908 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); 2909 objp += obj_offset(cachep); 2910 if (cachep->ctor && cachep->flags & SLAB_POISON) 2911 cachep->ctor(objp); 2912 if (ARCH_SLAB_MINALIGN && 2913 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 2914 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 2915 objp, (int)ARCH_SLAB_MINALIGN); 2916 } 2917 return objp; 2918 } 2919 #else 2920 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2921 #endif 2922 2923 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 2924 { 2925 if (unlikely(cachep == kmem_cache)) 2926 return false; 2927 2928 return should_failslab(cachep->object_size, flags, cachep->flags); 2929 } 2930 2931 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 2932 { 2933 void *objp; 2934 struct array_cache *ac; 2935 bool force_refill = false; 2936 2937 check_irq_off(); 2938 2939 ac = cpu_cache_get(cachep); 2940 if (likely(ac->avail)) { 2941 ac->touched = 1; 2942 objp = ac_get_obj(cachep, ac, flags, false); 2943 2944 /* 2945 * Allow for the possibility all avail objects are not allowed 2946 * by the current flags 2947 */ 2948 if (objp) { 2949 STATS_INC_ALLOCHIT(cachep); 2950 goto out; 2951 } 2952 force_refill = true; 2953 } 2954 2955 STATS_INC_ALLOCMISS(cachep); 2956 objp = cache_alloc_refill(cachep, flags, force_refill); 2957 /* 2958 * the 'ac' may be updated by cache_alloc_refill(), 2959 * and kmemleak_erase() requires its correct value. 2960 */ 2961 ac = cpu_cache_get(cachep); 2962 2963 out: 2964 /* 2965 * To avoid a false negative, if an object that is in one of the 2966 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 2967 * treat the array pointers as a reference to the object. 2968 */ 2969 if (objp) 2970 kmemleak_erase(&ac->entry[ac->avail]); 2971 return objp; 2972 } 2973 2974 #ifdef CONFIG_NUMA 2975 /* 2976 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. 2977 * 2978 * If we are in_interrupt, then process context, including cpusets and 2979 * mempolicy, may not apply and should not be used for allocation policy. 2980 */ 2981 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 2982 { 2983 int nid_alloc, nid_here; 2984 2985 if (in_interrupt() || (flags & __GFP_THISNODE)) 2986 return NULL; 2987 nid_alloc = nid_here = numa_mem_id(); 2988 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 2989 nid_alloc = cpuset_slab_spread_node(); 2990 else if (current->mempolicy) 2991 nid_alloc = mempolicy_slab_node(); 2992 if (nid_alloc != nid_here) 2993 return ____cache_alloc_node(cachep, flags, nid_alloc); 2994 return NULL; 2995 } 2996 2997 /* 2998 * Fallback function if there was no memory available and no objects on a 2999 * certain node and fall back is permitted. First we scan all the 3000 * available node for available objects. If that fails then we 3001 * perform an allocation without specifying a node. This allows the page 3002 * allocator to do its reclaim / fallback magic. We then insert the 3003 * slab into the proper nodelist and then allocate from it. 3004 */ 3005 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3006 { 3007 struct zonelist *zonelist; 3008 gfp_t local_flags; 3009 struct zoneref *z; 3010 struct zone *zone; 3011 enum zone_type high_zoneidx = gfp_zone(flags); 3012 void *obj = NULL; 3013 int nid; 3014 unsigned int cpuset_mems_cookie; 3015 3016 if (flags & __GFP_THISNODE) 3017 return NULL; 3018 3019 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3020 3021 retry_cpuset: 3022 cpuset_mems_cookie = read_mems_allowed_begin(); 3023 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3024 3025 retry: 3026 /* 3027 * Look through allowed nodes for objects available 3028 * from existing per node queues. 3029 */ 3030 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3031 nid = zone_to_nid(zone); 3032 3033 if (cpuset_zone_allowed(zone, flags) && 3034 get_node(cache, nid) && 3035 get_node(cache, nid)->free_objects) { 3036 obj = ____cache_alloc_node(cache, 3037 gfp_exact_node(flags), nid); 3038 if (obj) 3039 break; 3040 } 3041 } 3042 3043 if (!obj) { 3044 /* 3045 * This allocation will be performed within the constraints 3046 * of the current cpuset / memory policy requirements. 3047 * We may trigger various forms of reclaim on the allowed 3048 * set and go into memory reserves if necessary. 3049 */ 3050 struct page *page; 3051 3052 if (local_flags & __GFP_WAIT) 3053 local_irq_enable(); 3054 kmem_flagcheck(cache, flags); 3055 page = kmem_getpages(cache, local_flags, numa_mem_id()); 3056 if (local_flags & __GFP_WAIT) 3057 local_irq_disable(); 3058 if (page) { 3059 /* 3060 * Insert into the appropriate per node queues 3061 */ 3062 nid = page_to_nid(page); 3063 if (cache_grow(cache, flags, nid, page)) { 3064 obj = ____cache_alloc_node(cache, 3065 gfp_exact_node(flags), nid); 3066 if (!obj) 3067 /* 3068 * Another processor may allocate the 3069 * objects in the slab since we are 3070 * not holding any locks. 3071 */ 3072 goto retry; 3073 } else { 3074 /* cache_grow already freed obj */ 3075 obj = NULL; 3076 } 3077 } 3078 } 3079 3080 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3081 goto retry_cpuset; 3082 return obj; 3083 } 3084 3085 /* 3086 * A interface to enable slab creation on nodeid 3087 */ 3088 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3089 int nodeid) 3090 { 3091 struct list_head *entry; 3092 struct page *page; 3093 struct kmem_cache_node *n; 3094 void *obj; 3095 int x; 3096 3097 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); 3098 n = get_node(cachep, nodeid); 3099 BUG_ON(!n); 3100 3101 retry: 3102 check_irq_off(); 3103 spin_lock(&n->list_lock); 3104 entry = n->slabs_partial.next; 3105 if (entry == &n->slabs_partial) { 3106 n->free_touched = 1; 3107 entry = n->slabs_free.next; 3108 if (entry == &n->slabs_free) 3109 goto must_grow; 3110 } 3111 3112 page = list_entry(entry, struct page, lru); 3113 check_spinlock_acquired_node(cachep, nodeid); 3114 3115 STATS_INC_NODEALLOCS(cachep); 3116 STATS_INC_ACTIVE(cachep); 3117 STATS_SET_HIGH(cachep); 3118 3119 BUG_ON(page->active == cachep->num); 3120 3121 obj = slab_get_obj(cachep, page, nodeid); 3122 n->free_objects--; 3123 /* move slabp to correct slabp list: */ 3124 list_del(&page->lru); 3125 3126 if (page->active == cachep->num) 3127 list_add(&page->lru, &n->slabs_full); 3128 else 3129 list_add(&page->lru, &n->slabs_partial); 3130 3131 spin_unlock(&n->list_lock); 3132 goto done; 3133 3134 must_grow: 3135 spin_unlock(&n->list_lock); 3136 x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL); 3137 if (x) 3138 goto retry; 3139 3140 return fallback_alloc(cachep, flags); 3141 3142 done: 3143 return obj; 3144 } 3145 3146 static __always_inline void * 3147 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3148 unsigned long caller) 3149 { 3150 unsigned long save_flags; 3151 void *ptr; 3152 int slab_node = numa_mem_id(); 3153 3154 flags &= gfp_allowed_mask; 3155 3156 lockdep_trace_alloc(flags); 3157 3158 if (slab_should_failslab(cachep, flags)) 3159 return NULL; 3160 3161 cachep = memcg_kmem_get_cache(cachep, flags); 3162 3163 cache_alloc_debugcheck_before(cachep, flags); 3164 local_irq_save(save_flags); 3165 3166 if (nodeid == NUMA_NO_NODE) 3167 nodeid = slab_node; 3168 3169 if (unlikely(!get_node(cachep, nodeid))) { 3170 /* Node not bootstrapped yet */ 3171 ptr = fallback_alloc(cachep, flags); 3172 goto out; 3173 } 3174 3175 if (nodeid == slab_node) { 3176 /* 3177 * Use the locally cached objects if possible. 3178 * However ____cache_alloc does not allow fallback 3179 * to other nodes. It may fail while we still have 3180 * objects on other nodes available. 3181 */ 3182 ptr = ____cache_alloc(cachep, flags); 3183 if (ptr) 3184 goto out; 3185 } 3186 /* ___cache_alloc_node can fall back to other nodes */ 3187 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3188 out: 3189 local_irq_restore(save_flags); 3190 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3191 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, 3192 flags); 3193 3194 if (likely(ptr)) { 3195 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); 3196 if (unlikely(flags & __GFP_ZERO)) 3197 memset(ptr, 0, cachep->object_size); 3198 } 3199 3200 memcg_kmem_put_cache(cachep); 3201 return ptr; 3202 } 3203 3204 static __always_inline void * 3205 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3206 { 3207 void *objp; 3208 3209 if (current->mempolicy || cpuset_do_slab_mem_spread()) { 3210 objp = alternate_node_alloc(cache, flags); 3211 if (objp) 3212 goto out; 3213 } 3214 objp = ____cache_alloc(cache, flags); 3215 3216 /* 3217 * We may just have run out of memory on the local node. 3218 * ____cache_alloc_node() knows how to locate memory on other nodes 3219 */ 3220 if (!objp) 3221 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); 3222 3223 out: 3224 return objp; 3225 } 3226 #else 3227 3228 static __always_inline void * 3229 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3230 { 3231 return ____cache_alloc(cachep, flags); 3232 } 3233 3234 #endif /* CONFIG_NUMA */ 3235 3236 static __always_inline void * 3237 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3238 { 3239 unsigned long save_flags; 3240 void *objp; 3241 3242 flags &= gfp_allowed_mask; 3243 3244 lockdep_trace_alloc(flags); 3245 3246 if (slab_should_failslab(cachep, flags)) 3247 return NULL; 3248 3249 cachep = memcg_kmem_get_cache(cachep, flags); 3250 3251 cache_alloc_debugcheck_before(cachep, flags); 3252 local_irq_save(save_flags); 3253 objp = __do_cache_alloc(cachep, flags); 3254 local_irq_restore(save_flags); 3255 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3256 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, 3257 flags); 3258 prefetchw(objp); 3259 3260 if (likely(objp)) { 3261 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); 3262 if (unlikely(flags & __GFP_ZERO)) 3263 memset(objp, 0, cachep->object_size); 3264 } 3265 3266 memcg_kmem_put_cache(cachep); 3267 return objp; 3268 } 3269 3270 /* 3271 * Caller needs to acquire correct kmem_cache_node's list_lock 3272 * @list: List of detached free slabs should be freed by caller 3273 */ 3274 static void free_block(struct kmem_cache *cachep, void **objpp, 3275 int nr_objects, int node, struct list_head *list) 3276 { 3277 int i; 3278 struct kmem_cache_node *n = get_node(cachep, node); 3279 3280 for (i = 0; i < nr_objects; i++) { 3281 void *objp; 3282 struct page *page; 3283 3284 clear_obj_pfmemalloc(&objpp[i]); 3285 objp = objpp[i]; 3286 3287 page = virt_to_head_page(objp); 3288 list_del(&page->lru); 3289 check_spinlock_acquired_node(cachep, node); 3290 slab_put_obj(cachep, page, objp, node); 3291 STATS_DEC_ACTIVE(cachep); 3292 n->free_objects++; 3293 3294 /* fixup slab chains */ 3295 if (page->active == 0) { 3296 if (n->free_objects > n->free_limit) { 3297 n->free_objects -= cachep->num; 3298 list_add_tail(&page->lru, list); 3299 } else { 3300 list_add(&page->lru, &n->slabs_free); 3301 } 3302 } else { 3303 /* Unconditionally move a slab to the end of the 3304 * partial list on free - maximum time for the 3305 * other objects to be freed, too. 3306 */ 3307 list_add_tail(&page->lru, &n->slabs_partial); 3308 } 3309 } 3310 } 3311 3312 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3313 { 3314 int batchcount; 3315 struct kmem_cache_node *n; 3316 int node = numa_mem_id(); 3317 LIST_HEAD(list); 3318 3319 batchcount = ac->batchcount; 3320 #if DEBUG 3321 BUG_ON(!batchcount || batchcount > ac->avail); 3322 #endif 3323 check_irq_off(); 3324 n = get_node(cachep, node); 3325 spin_lock(&n->list_lock); 3326 if (n->shared) { 3327 struct array_cache *shared_array = n->shared; 3328 int max = shared_array->limit - shared_array->avail; 3329 if (max) { 3330 if (batchcount > max) 3331 batchcount = max; 3332 memcpy(&(shared_array->entry[shared_array->avail]), 3333 ac->entry, sizeof(void *) * batchcount); 3334 shared_array->avail += batchcount; 3335 goto free_done; 3336 } 3337 } 3338 3339 free_block(cachep, ac->entry, batchcount, node, &list); 3340 free_done: 3341 #if STATS 3342 { 3343 int i = 0; 3344 struct list_head *p; 3345 3346 p = n->slabs_free.next; 3347 while (p != &(n->slabs_free)) { 3348 struct page *page; 3349 3350 page = list_entry(p, struct page, lru); 3351 BUG_ON(page->active); 3352 3353 i++; 3354 p = p->next; 3355 } 3356 STATS_SET_FREEABLE(cachep, i); 3357 } 3358 #endif 3359 spin_unlock(&n->list_lock); 3360 slabs_destroy(cachep, &list); 3361 ac->avail -= batchcount; 3362 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3363 } 3364 3365 /* 3366 * Release an obj back to its cache. If the obj has a constructed state, it must 3367 * be in this state _before_ it is released. Called with disabled ints. 3368 */ 3369 static inline void __cache_free(struct kmem_cache *cachep, void *objp, 3370 unsigned long caller) 3371 { 3372 struct array_cache *ac = cpu_cache_get(cachep); 3373 3374 check_irq_off(); 3375 kmemleak_free_recursive(objp, cachep->flags); 3376 objp = cache_free_debugcheck(cachep, objp, caller); 3377 3378 kmemcheck_slab_free(cachep, objp, cachep->object_size); 3379 3380 /* 3381 * Skip calling cache_free_alien() when the platform is not numa. 3382 * This will avoid cache misses that happen while accessing slabp (which 3383 * is per page memory reference) to get nodeid. Instead use a global 3384 * variable to skip the call, which is mostly likely to be present in 3385 * the cache. 3386 */ 3387 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3388 return; 3389 3390 if (ac->avail < ac->limit) { 3391 STATS_INC_FREEHIT(cachep); 3392 } else { 3393 STATS_INC_FREEMISS(cachep); 3394 cache_flusharray(cachep, ac); 3395 } 3396 3397 ac_put_obj(cachep, ac, objp); 3398 } 3399 3400 /** 3401 * kmem_cache_alloc - Allocate an object 3402 * @cachep: The cache to allocate from. 3403 * @flags: See kmalloc(). 3404 * 3405 * Allocate an object from this cache. The flags are only relevant 3406 * if the cache has no available objects. 3407 */ 3408 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3409 { 3410 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3411 3412 trace_kmem_cache_alloc(_RET_IP_, ret, 3413 cachep->object_size, cachep->size, flags); 3414 3415 return ret; 3416 } 3417 EXPORT_SYMBOL(kmem_cache_alloc); 3418 3419 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 3420 { 3421 __kmem_cache_free_bulk(s, size, p); 3422 } 3423 EXPORT_SYMBOL(kmem_cache_free_bulk); 3424 3425 bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3426 void **p) 3427 { 3428 return __kmem_cache_alloc_bulk(s, flags, size, p); 3429 } 3430 EXPORT_SYMBOL(kmem_cache_alloc_bulk); 3431 3432 #ifdef CONFIG_TRACING 3433 void * 3434 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3435 { 3436 void *ret; 3437 3438 ret = slab_alloc(cachep, flags, _RET_IP_); 3439 3440 trace_kmalloc(_RET_IP_, ret, 3441 size, cachep->size, flags); 3442 return ret; 3443 } 3444 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3445 #endif 3446 3447 #ifdef CONFIG_NUMA 3448 /** 3449 * kmem_cache_alloc_node - Allocate an object on the specified node 3450 * @cachep: The cache to allocate from. 3451 * @flags: See kmalloc(). 3452 * @nodeid: node number of the target node. 3453 * 3454 * Identical to kmem_cache_alloc but it will allocate memory on the given 3455 * node, which can improve the performance for cpu bound structures. 3456 * 3457 * Fallback to other node is possible if __GFP_THISNODE is not set. 3458 */ 3459 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3460 { 3461 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3462 3463 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3464 cachep->object_size, cachep->size, 3465 flags, nodeid); 3466 3467 return ret; 3468 } 3469 EXPORT_SYMBOL(kmem_cache_alloc_node); 3470 3471 #ifdef CONFIG_TRACING 3472 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3473 gfp_t flags, 3474 int nodeid, 3475 size_t size) 3476 { 3477 void *ret; 3478 3479 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3480 3481 trace_kmalloc_node(_RET_IP_, ret, 3482 size, cachep->size, 3483 flags, nodeid); 3484 return ret; 3485 } 3486 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3487 #endif 3488 3489 static __always_inline void * 3490 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3491 { 3492 struct kmem_cache *cachep; 3493 3494 cachep = kmalloc_slab(size, flags); 3495 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3496 return cachep; 3497 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3498 } 3499 3500 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3501 { 3502 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3503 } 3504 EXPORT_SYMBOL(__kmalloc_node); 3505 3506 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3507 int node, unsigned long caller) 3508 { 3509 return __do_kmalloc_node(size, flags, node, caller); 3510 } 3511 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3512 #endif /* CONFIG_NUMA */ 3513 3514 /** 3515 * __do_kmalloc - allocate memory 3516 * @size: how many bytes of memory are required. 3517 * @flags: the type of memory to allocate (see kmalloc). 3518 * @caller: function caller for debug tracking of the caller 3519 */ 3520 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3521 unsigned long caller) 3522 { 3523 struct kmem_cache *cachep; 3524 void *ret; 3525 3526 cachep = kmalloc_slab(size, flags); 3527 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3528 return cachep; 3529 ret = slab_alloc(cachep, flags, caller); 3530 3531 trace_kmalloc(caller, ret, 3532 size, cachep->size, flags); 3533 3534 return ret; 3535 } 3536 3537 void *__kmalloc(size_t size, gfp_t flags) 3538 { 3539 return __do_kmalloc(size, flags, _RET_IP_); 3540 } 3541 EXPORT_SYMBOL(__kmalloc); 3542 3543 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3544 { 3545 return __do_kmalloc(size, flags, caller); 3546 } 3547 EXPORT_SYMBOL(__kmalloc_track_caller); 3548 3549 /** 3550 * kmem_cache_free - Deallocate an object 3551 * @cachep: The cache the allocation was from. 3552 * @objp: The previously allocated object. 3553 * 3554 * Free an object which was previously allocated from this 3555 * cache. 3556 */ 3557 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3558 { 3559 unsigned long flags; 3560 cachep = cache_from_obj(cachep, objp); 3561 if (!cachep) 3562 return; 3563 3564 local_irq_save(flags); 3565 debug_check_no_locks_freed(objp, cachep->object_size); 3566 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3567 debug_check_no_obj_freed(objp, cachep->object_size); 3568 __cache_free(cachep, objp, _RET_IP_); 3569 local_irq_restore(flags); 3570 3571 trace_kmem_cache_free(_RET_IP_, objp); 3572 } 3573 EXPORT_SYMBOL(kmem_cache_free); 3574 3575 /** 3576 * kfree - free previously allocated memory 3577 * @objp: pointer returned by kmalloc. 3578 * 3579 * If @objp is NULL, no operation is performed. 3580 * 3581 * Don't free memory not originally allocated by kmalloc() 3582 * or you will run into trouble. 3583 */ 3584 void kfree(const void *objp) 3585 { 3586 struct kmem_cache *c; 3587 unsigned long flags; 3588 3589 trace_kfree(_RET_IP_, objp); 3590 3591 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3592 return; 3593 local_irq_save(flags); 3594 kfree_debugcheck(objp); 3595 c = virt_to_cache(objp); 3596 debug_check_no_locks_freed(objp, c->object_size); 3597 3598 debug_check_no_obj_freed(objp, c->object_size); 3599 __cache_free(c, (void *)objp, _RET_IP_); 3600 local_irq_restore(flags); 3601 } 3602 EXPORT_SYMBOL(kfree); 3603 3604 /* 3605 * This initializes kmem_cache_node or resizes various caches for all nodes. 3606 */ 3607 static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) 3608 { 3609 int node; 3610 struct kmem_cache_node *n; 3611 struct array_cache *new_shared; 3612 struct alien_cache **new_alien = NULL; 3613 3614 for_each_online_node(node) { 3615 3616 if (use_alien_caches) { 3617 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 3618 if (!new_alien) 3619 goto fail; 3620 } 3621 3622 new_shared = NULL; 3623 if (cachep->shared) { 3624 new_shared = alloc_arraycache(node, 3625 cachep->shared*cachep->batchcount, 3626 0xbaadf00d, gfp); 3627 if (!new_shared) { 3628 free_alien_cache(new_alien); 3629 goto fail; 3630 } 3631 } 3632 3633 n = get_node(cachep, node); 3634 if (n) { 3635 struct array_cache *shared = n->shared; 3636 LIST_HEAD(list); 3637 3638 spin_lock_irq(&n->list_lock); 3639 3640 if (shared) 3641 free_block(cachep, shared->entry, 3642 shared->avail, node, &list); 3643 3644 n->shared = new_shared; 3645 if (!n->alien) { 3646 n->alien = new_alien; 3647 new_alien = NULL; 3648 } 3649 n->free_limit = (1 + nr_cpus_node(node)) * 3650 cachep->batchcount + cachep->num; 3651 spin_unlock_irq(&n->list_lock); 3652 slabs_destroy(cachep, &list); 3653 kfree(shared); 3654 free_alien_cache(new_alien); 3655 continue; 3656 } 3657 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 3658 if (!n) { 3659 free_alien_cache(new_alien); 3660 kfree(new_shared); 3661 goto fail; 3662 } 3663 3664 kmem_cache_node_init(n); 3665 n->next_reap = jiffies + REAPTIMEOUT_NODE + 3666 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 3667 n->shared = new_shared; 3668 n->alien = new_alien; 3669 n->free_limit = (1 + nr_cpus_node(node)) * 3670 cachep->batchcount + cachep->num; 3671 cachep->node[node] = n; 3672 } 3673 return 0; 3674 3675 fail: 3676 if (!cachep->list.next) { 3677 /* Cache is not active yet. Roll back what we did */ 3678 node--; 3679 while (node >= 0) { 3680 n = get_node(cachep, node); 3681 if (n) { 3682 kfree(n->shared); 3683 free_alien_cache(n->alien); 3684 kfree(n); 3685 cachep->node[node] = NULL; 3686 } 3687 node--; 3688 } 3689 } 3690 return -ENOMEM; 3691 } 3692 3693 /* Always called with the slab_mutex held */ 3694 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, 3695 int batchcount, int shared, gfp_t gfp) 3696 { 3697 struct array_cache __percpu *cpu_cache, *prev; 3698 int cpu; 3699 3700 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); 3701 if (!cpu_cache) 3702 return -ENOMEM; 3703 3704 prev = cachep->cpu_cache; 3705 cachep->cpu_cache = cpu_cache; 3706 kick_all_cpus_sync(); 3707 3708 check_irq_on(); 3709 cachep->batchcount = batchcount; 3710 cachep->limit = limit; 3711 cachep->shared = shared; 3712 3713 if (!prev) 3714 goto alloc_node; 3715 3716 for_each_online_cpu(cpu) { 3717 LIST_HEAD(list); 3718 int node; 3719 struct kmem_cache_node *n; 3720 struct array_cache *ac = per_cpu_ptr(prev, cpu); 3721 3722 node = cpu_to_mem(cpu); 3723 n = get_node(cachep, node); 3724 spin_lock_irq(&n->list_lock); 3725 free_block(cachep, ac->entry, ac->avail, node, &list); 3726 spin_unlock_irq(&n->list_lock); 3727 slabs_destroy(cachep, &list); 3728 } 3729 free_percpu(prev); 3730 3731 alloc_node: 3732 return alloc_kmem_cache_node(cachep, gfp); 3733 } 3734 3735 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3736 int batchcount, int shared, gfp_t gfp) 3737 { 3738 int ret; 3739 struct kmem_cache *c; 3740 3741 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3742 3743 if (slab_state < FULL) 3744 return ret; 3745 3746 if ((ret < 0) || !is_root_cache(cachep)) 3747 return ret; 3748 3749 lockdep_assert_held(&slab_mutex); 3750 for_each_memcg_cache(c, cachep) { 3751 /* return value determined by the root cache only */ 3752 __do_tune_cpucache(c, limit, batchcount, shared, gfp); 3753 } 3754 3755 return ret; 3756 } 3757 3758 /* Called with slab_mutex held always */ 3759 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3760 { 3761 int err; 3762 int limit = 0; 3763 int shared = 0; 3764 int batchcount = 0; 3765 3766 if (!is_root_cache(cachep)) { 3767 struct kmem_cache *root = memcg_root_cache(cachep); 3768 limit = root->limit; 3769 shared = root->shared; 3770 batchcount = root->batchcount; 3771 } 3772 3773 if (limit && shared && batchcount) 3774 goto skip_setup; 3775 /* 3776 * The head array serves three purposes: 3777 * - create a LIFO ordering, i.e. return objects that are cache-warm 3778 * - reduce the number of spinlock operations. 3779 * - reduce the number of linked list operations on the slab and 3780 * bufctl chains: array operations are cheaper. 3781 * The numbers are guessed, we should auto-tune as described by 3782 * Bonwick. 3783 */ 3784 if (cachep->size > 131072) 3785 limit = 1; 3786 else if (cachep->size > PAGE_SIZE) 3787 limit = 8; 3788 else if (cachep->size > 1024) 3789 limit = 24; 3790 else if (cachep->size > 256) 3791 limit = 54; 3792 else 3793 limit = 120; 3794 3795 /* 3796 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3797 * allocation behaviour: Most allocs on one cpu, most free operations 3798 * on another cpu. For these cases, an efficient object passing between 3799 * cpus is necessary. This is provided by a shared array. The array 3800 * replaces Bonwick's magazine layer. 3801 * On uniprocessor, it's functionally equivalent (but less efficient) 3802 * to a larger limit. Thus disabled by default. 3803 */ 3804 shared = 0; 3805 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3806 shared = 8; 3807 3808 #if DEBUG 3809 /* 3810 * With debugging enabled, large batchcount lead to excessively long 3811 * periods with disabled local interrupts. Limit the batchcount 3812 */ 3813 if (limit > 32) 3814 limit = 32; 3815 #endif 3816 batchcount = (limit + 1) / 2; 3817 skip_setup: 3818 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3819 if (err) 3820 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3821 cachep->name, -err); 3822 return err; 3823 } 3824 3825 /* 3826 * Drain an array if it contains any elements taking the node lock only if 3827 * necessary. Note that the node listlock also protects the array_cache 3828 * if drain_array() is used on the shared array. 3829 */ 3830 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3831 struct array_cache *ac, int force, int node) 3832 { 3833 LIST_HEAD(list); 3834 int tofree; 3835 3836 if (!ac || !ac->avail) 3837 return; 3838 if (ac->touched && !force) { 3839 ac->touched = 0; 3840 } else { 3841 spin_lock_irq(&n->list_lock); 3842 if (ac->avail) { 3843 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3844 if (tofree > ac->avail) 3845 tofree = (ac->avail + 1) / 2; 3846 free_block(cachep, ac->entry, tofree, node, &list); 3847 ac->avail -= tofree; 3848 memmove(ac->entry, &(ac->entry[tofree]), 3849 sizeof(void *) * ac->avail); 3850 } 3851 spin_unlock_irq(&n->list_lock); 3852 slabs_destroy(cachep, &list); 3853 } 3854 } 3855 3856 /** 3857 * cache_reap - Reclaim memory from caches. 3858 * @w: work descriptor 3859 * 3860 * Called from workqueue/eventd every few seconds. 3861 * Purpose: 3862 * - clear the per-cpu caches for this CPU. 3863 * - return freeable pages to the main free memory pool. 3864 * 3865 * If we cannot acquire the cache chain mutex then just give up - we'll try 3866 * again on the next iteration. 3867 */ 3868 static void cache_reap(struct work_struct *w) 3869 { 3870 struct kmem_cache *searchp; 3871 struct kmem_cache_node *n; 3872 int node = numa_mem_id(); 3873 struct delayed_work *work = to_delayed_work(w); 3874 3875 if (!mutex_trylock(&slab_mutex)) 3876 /* Give up. Setup the next iteration. */ 3877 goto out; 3878 3879 list_for_each_entry(searchp, &slab_caches, list) { 3880 check_irq_on(); 3881 3882 /* 3883 * We only take the node lock if absolutely necessary and we 3884 * have established with reasonable certainty that 3885 * we can do some work if the lock was obtained. 3886 */ 3887 n = get_node(searchp, node); 3888 3889 reap_alien(searchp, n); 3890 3891 drain_array(searchp, n, cpu_cache_get(searchp), 0, node); 3892 3893 /* 3894 * These are racy checks but it does not matter 3895 * if we skip one check or scan twice. 3896 */ 3897 if (time_after(n->next_reap, jiffies)) 3898 goto next; 3899 3900 n->next_reap = jiffies + REAPTIMEOUT_NODE; 3901 3902 drain_array(searchp, n, n->shared, 0, node); 3903 3904 if (n->free_touched) 3905 n->free_touched = 0; 3906 else { 3907 int freed; 3908 3909 freed = drain_freelist(searchp, n, (n->free_limit + 3910 5 * searchp->num - 1) / (5 * searchp->num)); 3911 STATS_ADD_REAPED(searchp, freed); 3912 } 3913 next: 3914 cond_resched(); 3915 } 3916 check_irq_on(); 3917 mutex_unlock(&slab_mutex); 3918 next_reap_node(); 3919 out: 3920 /* Set up the next iteration */ 3921 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); 3922 } 3923 3924 #ifdef CONFIG_SLABINFO 3925 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 3926 { 3927 struct page *page; 3928 unsigned long active_objs; 3929 unsigned long num_objs; 3930 unsigned long active_slabs = 0; 3931 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 3932 const char *name; 3933 char *error = NULL; 3934 int node; 3935 struct kmem_cache_node *n; 3936 3937 active_objs = 0; 3938 num_slabs = 0; 3939 for_each_kmem_cache_node(cachep, node, n) { 3940 3941 check_irq_on(); 3942 spin_lock_irq(&n->list_lock); 3943 3944 list_for_each_entry(page, &n->slabs_full, lru) { 3945 if (page->active != cachep->num && !error) 3946 error = "slabs_full accounting error"; 3947 active_objs += cachep->num; 3948 active_slabs++; 3949 } 3950 list_for_each_entry(page, &n->slabs_partial, lru) { 3951 if (page->active == cachep->num && !error) 3952 error = "slabs_partial accounting error"; 3953 if (!page->active && !error) 3954 error = "slabs_partial accounting error"; 3955 active_objs += page->active; 3956 active_slabs++; 3957 } 3958 list_for_each_entry(page, &n->slabs_free, lru) { 3959 if (page->active && !error) 3960 error = "slabs_free accounting error"; 3961 num_slabs++; 3962 } 3963 free_objects += n->free_objects; 3964 if (n->shared) 3965 shared_avail += n->shared->avail; 3966 3967 spin_unlock_irq(&n->list_lock); 3968 } 3969 num_slabs += active_slabs; 3970 num_objs = num_slabs * cachep->num; 3971 if (num_objs - active_objs != free_objects && !error) 3972 error = "free_objects accounting error"; 3973 3974 name = cachep->name; 3975 if (error) 3976 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 3977 3978 sinfo->active_objs = active_objs; 3979 sinfo->num_objs = num_objs; 3980 sinfo->active_slabs = active_slabs; 3981 sinfo->num_slabs = num_slabs; 3982 sinfo->shared_avail = shared_avail; 3983 sinfo->limit = cachep->limit; 3984 sinfo->batchcount = cachep->batchcount; 3985 sinfo->shared = cachep->shared; 3986 sinfo->objects_per_slab = cachep->num; 3987 sinfo->cache_order = cachep->gfporder; 3988 } 3989 3990 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 3991 { 3992 #if STATS 3993 { /* node stats */ 3994 unsigned long high = cachep->high_mark; 3995 unsigned long allocs = cachep->num_allocations; 3996 unsigned long grown = cachep->grown; 3997 unsigned long reaped = cachep->reaped; 3998 unsigned long errors = cachep->errors; 3999 unsigned long max_freeable = cachep->max_freeable; 4000 unsigned long node_allocs = cachep->node_allocs; 4001 unsigned long node_frees = cachep->node_frees; 4002 unsigned long overflows = cachep->node_overflow; 4003 4004 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " 4005 "%4lu %4lu %4lu %4lu %4lu", 4006 allocs, high, grown, 4007 reaped, errors, max_freeable, node_allocs, 4008 node_frees, overflows); 4009 } 4010 /* cpu stats */ 4011 { 4012 unsigned long allochit = atomic_read(&cachep->allochit); 4013 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4014 unsigned long freehit = atomic_read(&cachep->freehit); 4015 unsigned long freemiss = atomic_read(&cachep->freemiss); 4016 4017 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4018 allochit, allocmiss, freehit, freemiss); 4019 } 4020 #endif 4021 } 4022 4023 #define MAX_SLABINFO_WRITE 128 4024 /** 4025 * slabinfo_write - Tuning for the slab allocator 4026 * @file: unused 4027 * @buffer: user buffer 4028 * @count: data length 4029 * @ppos: unused 4030 */ 4031 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4032 size_t count, loff_t *ppos) 4033 { 4034 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4035 int limit, batchcount, shared, res; 4036 struct kmem_cache *cachep; 4037 4038 if (count > MAX_SLABINFO_WRITE) 4039 return -EINVAL; 4040 if (copy_from_user(&kbuf, buffer, count)) 4041 return -EFAULT; 4042 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4043 4044 tmp = strchr(kbuf, ' '); 4045 if (!tmp) 4046 return -EINVAL; 4047 *tmp = '\0'; 4048 tmp++; 4049 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4050 return -EINVAL; 4051 4052 /* Find the cache in the chain of caches. */ 4053 mutex_lock(&slab_mutex); 4054 res = -EINVAL; 4055 list_for_each_entry(cachep, &slab_caches, list) { 4056 if (!strcmp(cachep->name, kbuf)) { 4057 if (limit < 1 || batchcount < 1 || 4058 batchcount > limit || shared < 0) { 4059 res = 0; 4060 } else { 4061 res = do_tune_cpucache(cachep, limit, 4062 batchcount, shared, 4063 GFP_KERNEL); 4064 } 4065 break; 4066 } 4067 } 4068 mutex_unlock(&slab_mutex); 4069 if (res >= 0) 4070 res = count; 4071 return res; 4072 } 4073 4074 #ifdef CONFIG_DEBUG_SLAB_LEAK 4075 4076 static inline int add_caller(unsigned long *n, unsigned long v) 4077 { 4078 unsigned long *p; 4079 int l; 4080 if (!v) 4081 return 1; 4082 l = n[1]; 4083 p = n + 2; 4084 while (l) { 4085 int i = l/2; 4086 unsigned long *q = p + 2 * i; 4087 if (*q == v) { 4088 q[1]++; 4089 return 1; 4090 } 4091 if (*q > v) { 4092 l = i; 4093 } else { 4094 p = q + 2; 4095 l -= i + 1; 4096 } 4097 } 4098 if (++n[1] == n[0]) 4099 return 0; 4100 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4101 p[0] = v; 4102 p[1] = 1; 4103 return 1; 4104 } 4105 4106 static void handle_slab(unsigned long *n, struct kmem_cache *c, 4107 struct page *page) 4108 { 4109 void *p; 4110 int i; 4111 4112 if (n[0] == n[1]) 4113 return; 4114 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4115 if (get_obj_status(page, i) != OBJECT_ACTIVE) 4116 continue; 4117 4118 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4119 return; 4120 } 4121 } 4122 4123 static void show_symbol(struct seq_file *m, unsigned long address) 4124 { 4125 #ifdef CONFIG_KALLSYMS 4126 unsigned long offset, size; 4127 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4128 4129 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4130 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4131 if (modname[0]) 4132 seq_printf(m, " [%s]", modname); 4133 return; 4134 } 4135 #endif 4136 seq_printf(m, "%p", (void *)address); 4137 } 4138 4139 static int leaks_show(struct seq_file *m, void *p) 4140 { 4141 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4142 struct page *page; 4143 struct kmem_cache_node *n; 4144 const char *name; 4145 unsigned long *x = m->private; 4146 int node; 4147 int i; 4148 4149 if (!(cachep->flags & SLAB_STORE_USER)) 4150 return 0; 4151 if (!(cachep->flags & SLAB_RED_ZONE)) 4152 return 0; 4153 4154 /* OK, we can do it */ 4155 4156 x[1] = 0; 4157 4158 for_each_kmem_cache_node(cachep, node, n) { 4159 4160 check_irq_on(); 4161 spin_lock_irq(&n->list_lock); 4162 4163 list_for_each_entry(page, &n->slabs_full, lru) 4164 handle_slab(x, cachep, page); 4165 list_for_each_entry(page, &n->slabs_partial, lru) 4166 handle_slab(x, cachep, page); 4167 spin_unlock_irq(&n->list_lock); 4168 } 4169 name = cachep->name; 4170 if (x[0] == x[1]) { 4171 /* Increase the buffer size */ 4172 mutex_unlock(&slab_mutex); 4173 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4174 if (!m->private) { 4175 /* Too bad, we are really out */ 4176 m->private = x; 4177 mutex_lock(&slab_mutex); 4178 return -ENOMEM; 4179 } 4180 *(unsigned long *)m->private = x[0] * 2; 4181 kfree(x); 4182 mutex_lock(&slab_mutex); 4183 /* Now make sure this entry will be retried */ 4184 m->count = m->size; 4185 return 0; 4186 } 4187 for (i = 0; i < x[1]; i++) { 4188 seq_printf(m, "%s: %lu ", name, x[2*i+3]); 4189 show_symbol(m, x[2*i+2]); 4190 seq_putc(m, '\n'); 4191 } 4192 4193 return 0; 4194 } 4195 4196 static const struct seq_operations slabstats_op = { 4197 .start = slab_start, 4198 .next = slab_next, 4199 .stop = slab_stop, 4200 .show = leaks_show, 4201 }; 4202 4203 static int slabstats_open(struct inode *inode, struct file *file) 4204 { 4205 unsigned long *n; 4206 4207 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); 4208 if (!n) 4209 return -ENOMEM; 4210 4211 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4212 4213 return 0; 4214 } 4215 4216 static const struct file_operations proc_slabstats_operations = { 4217 .open = slabstats_open, 4218 .read = seq_read, 4219 .llseek = seq_lseek, 4220 .release = seq_release_private, 4221 }; 4222 #endif 4223 4224 static int __init slab_proc_init(void) 4225 { 4226 #ifdef CONFIG_DEBUG_SLAB_LEAK 4227 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4228 #endif 4229 return 0; 4230 } 4231 module_init(slab_proc_init); 4232 #endif 4233 4234 /** 4235 * ksize - get the actual amount of memory allocated for a given object 4236 * @objp: Pointer to the object 4237 * 4238 * kmalloc may internally round up allocations and return more memory 4239 * than requested. ksize() can be used to determine the actual amount of 4240 * memory allocated. The caller may use this additional memory, even though 4241 * a smaller amount of memory was initially specified with the kmalloc call. 4242 * The caller must guarantee that objp points to a valid object previously 4243 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4244 * must not be freed during the duration of the call. 4245 */ 4246 size_t ksize(const void *objp) 4247 { 4248 BUG_ON(!objp); 4249 if (unlikely(objp == ZERO_SIZE_PTR)) 4250 return 0; 4251 4252 return virt_to_cache(objp)->object_size; 4253 } 4254 EXPORT_SYMBOL(ksize); 4255