1 /* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same initializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'slab_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89 #include <linux/slab.h> 90 #include <linux/mm.h> 91 #include <linux/poison.h> 92 #include <linux/swap.h> 93 #include <linux/cache.h> 94 #include <linux/interrupt.h> 95 #include <linux/init.h> 96 #include <linux/compiler.h> 97 #include <linux/cpuset.h> 98 #include <linux/proc_fs.h> 99 #include <linux/seq_file.h> 100 #include <linux/notifier.h> 101 #include <linux/kallsyms.h> 102 #include <linux/cpu.h> 103 #include <linux/sysctl.h> 104 #include <linux/module.h> 105 #include <linux/rcupdate.h> 106 #include <linux/string.h> 107 #include <linux/uaccess.h> 108 #include <linux/nodemask.h> 109 #include <linux/kmemleak.h> 110 #include <linux/mempolicy.h> 111 #include <linux/mutex.h> 112 #include <linux/fault-inject.h> 113 #include <linux/rtmutex.h> 114 #include <linux/reciprocal_div.h> 115 #include <linux/debugobjects.h> 116 #include <linux/kmemcheck.h> 117 #include <linux/memory.h> 118 #include <linux/prefetch.h> 119 120 #include <net/sock.h> 121 122 #include <asm/cacheflush.h> 123 #include <asm/tlbflush.h> 124 #include <asm/page.h> 125 126 #include <trace/events/kmem.h> 127 128 #include "internal.h" 129 130 #include "slab.h" 131 132 /* 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 134 * 0 for faster, smaller code (especially in the critical paths). 135 * 136 * STATS - 1 to collect stats for /proc/slabinfo. 137 * 0 for faster, smaller code (especially in the critical paths). 138 * 139 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 140 */ 141 142 #ifdef CONFIG_DEBUG_SLAB 143 #define DEBUG 1 144 #define STATS 1 145 #define FORCED_DEBUG 1 146 #else 147 #define DEBUG 0 148 #define STATS 0 149 #define FORCED_DEBUG 0 150 #endif 151 152 /* Shouldn't this be in a header file somewhere? */ 153 #define BYTES_PER_WORD sizeof(void *) 154 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 155 156 #ifndef ARCH_KMALLOC_FLAGS 157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 158 #endif 159 160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ 161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) 162 163 #if FREELIST_BYTE_INDEX 164 typedef unsigned char freelist_idx_t; 165 #else 166 typedef unsigned short freelist_idx_t; 167 #endif 168 169 #define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) 170 171 /* 172 * true if a page was allocated from pfmemalloc reserves for network-based 173 * swap 174 */ 175 static bool pfmemalloc_active __read_mostly; 176 177 /* 178 * struct array_cache 179 * 180 * Purpose: 181 * - LIFO ordering, to hand out cache-warm objects from _alloc 182 * - reduce the number of linked list operations 183 * - reduce spinlock operations 184 * 185 * The limit is stored in the per-cpu structure to reduce the data cache 186 * footprint. 187 * 188 */ 189 struct array_cache { 190 unsigned int avail; 191 unsigned int limit; 192 unsigned int batchcount; 193 unsigned int touched; 194 spinlock_t lock; 195 void *entry[]; /* 196 * Must have this definition in here for the proper 197 * alignment of array_cache. Also simplifies accessing 198 * the entries. 199 * 200 * Entries should not be directly dereferenced as 201 * entries belonging to slabs marked pfmemalloc will 202 * have the lower bits set SLAB_OBJ_PFMEMALLOC 203 */ 204 }; 205 206 #define SLAB_OBJ_PFMEMALLOC 1 207 static inline bool is_obj_pfmemalloc(void *objp) 208 { 209 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC; 210 } 211 212 static inline void set_obj_pfmemalloc(void **objp) 213 { 214 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC); 215 return; 216 } 217 218 static inline void clear_obj_pfmemalloc(void **objp) 219 { 220 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC); 221 } 222 223 /* 224 * bootstrap: The caches do not work without cpuarrays anymore, but the 225 * cpuarrays are allocated from the generic caches... 226 */ 227 #define BOOT_CPUCACHE_ENTRIES 1 228 struct arraycache_init { 229 struct array_cache cache; 230 void *entries[BOOT_CPUCACHE_ENTRIES]; 231 }; 232 233 /* 234 * Need this for bootstrapping a per node allocator. 235 */ 236 #define NUM_INIT_LISTS (3 * MAX_NUMNODES) 237 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 238 #define CACHE_CACHE 0 239 #define SIZE_AC MAX_NUMNODES 240 #define SIZE_NODE (2 * MAX_NUMNODES) 241 242 static int drain_freelist(struct kmem_cache *cache, 243 struct kmem_cache_node *n, int tofree); 244 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 245 int node); 246 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 247 static void cache_reap(struct work_struct *unused); 248 249 static int slab_early_init = 1; 250 251 #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) 252 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 253 254 static void kmem_cache_node_init(struct kmem_cache_node *parent) 255 { 256 INIT_LIST_HEAD(&parent->slabs_full); 257 INIT_LIST_HEAD(&parent->slabs_partial); 258 INIT_LIST_HEAD(&parent->slabs_free); 259 parent->shared = NULL; 260 parent->alien = NULL; 261 parent->colour_next = 0; 262 spin_lock_init(&parent->list_lock); 263 parent->free_objects = 0; 264 parent->free_touched = 0; 265 } 266 267 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 268 do { \ 269 INIT_LIST_HEAD(listp); \ 270 list_splice(&(cachep->node[nodeid]->slab), listp); \ 271 } while (0) 272 273 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 274 do { \ 275 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 276 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 277 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 278 } while (0) 279 280 #define CFLGS_OFF_SLAB (0x80000000UL) 281 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 282 283 #define BATCHREFILL_LIMIT 16 284 /* 285 * Optimization question: fewer reaps means less probability for unnessary 286 * cpucache drain/refill cycles. 287 * 288 * OTOH the cpuarrays can contain lots of objects, 289 * which could lock up otherwise freeable slabs. 290 */ 291 #define REAPTIMEOUT_AC (2*HZ) 292 #define REAPTIMEOUT_NODE (4*HZ) 293 294 #if STATS 295 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 296 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 297 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 298 #define STATS_INC_GROWN(x) ((x)->grown++) 299 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 300 #define STATS_SET_HIGH(x) \ 301 do { \ 302 if ((x)->num_active > (x)->high_mark) \ 303 (x)->high_mark = (x)->num_active; \ 304 } while (0) 305 #define STATS_INC_ERR(x) ((x)->errors++) 306 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 307 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 308 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 309 #define STATS_SET_FREEABLE(x, i) \ 310 do { \ 311 if ((x)->max_freeable < i) \ 312 (x)->max_freeable = i; \ 313 } while (0) 314 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 315 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 316 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 317 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 318 #else 319 #define STATS_INC_ACTIVE(x) do { } while (0) 320 #define STATS_DEC_ACTIVE(x) do { } while (0) 321 #define STATS_INC_ALLOCED(x) do { } while (0) 322 #define STATS_INC_GROWN(x) do { } while (0) 323 #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) 324 #define STATS_SET_HIGH(x) do { } while (0) 325 #define STATS_INC_ERR(x) do { } while (0) 326 #define STATS_INC_NODEALLOCS(x) do { } while (0) 327 #define STATS_INC_NODEFREES(x) do { } while (0) 328 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 329 #define STATS_SET_FREEABLE(x, i) do { } while (0) 330 #define STATS_INC_ALLOCHIT(x) do { } while (0) 331 #define STATS_INC_ALLOCMISS(x) do { } while (0) 332 #define STATS_INC_FREEHIT(x) do { } while (0) 333 #define STATS_INC_FREEMISS(x) do { } while (0) 334 #endif 335 336 #if DEBUG 337 338 /* 339 * memory layout of objects: 340 * 0 : objp 341 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 342 * the end of an object is aligned with the end of the real 343 * allocation. Catches writes behind the end of the allocation. 344 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 345 * redzone word. 346 * cachep->obj_offset: The real object. 347 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 348 * cachep->size - 1* BYTES_PER_WORD: last caller address 349 * [BYTES_PER_WORD long] 350 */ 351 static int obj_offset(struct kmem_cache *cachep) 352 { 353 return cachep->obj_offset; 354 } 355 356 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 357 { 358 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 359 return (unsigned long long*) (objp + obj_offset(cachep) - 360 sizeof(unsigned long long)); 361 } 362 363 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 364 { 365 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 366 if (cachep->flags & SLAB_STORE_USER) 367 return (unsigned long long *)(objp + cachep->size - 368 sizeof(unsigned long long) - 369 REDZONE_ALIGN); 370 return (unsigned long long *) (objp + cachep->size - 371 sizeof(unsigned long long)); 372 } 373 374 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 375 { 376 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 377 return (void **)(objp + cachep->size - BYTES_PER_WORD); 378 } 379 380 #else 381 382 #define obj_offset(x) 0 383 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 384 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 385 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 386 387 #endif 388 389 /* 390 * Do not go above this order unless 0 objects fit into the slab or 391 * overridden on the command line. 392 */ 393 #define SLAB_MAX_ORDER_HI 1 394 #define SLAB_MAX_ORDER_LO 0 395 static int slab_max_order = SLAB_MAX_ORDER_LO; 396 static bool slab_max_order_set __initdata; 397 398 static inline struct kmem_cache *virt_to_cache(const void *obj) 399 { 400 struct page *page = virt_to_head_page(obj); 401 return page->slab_cache; 402 } 403 404 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, 405 unsigned int idx) 406 { 407 return page->s_mem + cache->size * idx; 408 } 409 410 /* 411 * We want to avoid an expensive divide : (offset / cache->size) 412 * Using the fact that size is a constant for a particular cache, 413 * we can replace (offset / cache->size) by 414 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 415 */ 416 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 417 const struct page *page, void *obj) 418 { 419 u32 offset = (obj - page->s_mem); 420 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 421 } 422 423 static struct arraycache_init initarray_generic = 424 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 425 426 /* internal cache of cache description objs */ 427 static struct kmem_cache kmem_cache_boot = { 428 .batchcount = 1, 429 .limit = BOOT_CPUCACHE_ENTRIES, 430 .shared = 1, 431 .size = sizeof(struct kmem_cache), 432 .name = "kmem_cache", 433 }; 434 435 #define BAD_ALIEN_MAGIC 0x01020304ul 436 437 #ifdef CONFIG_LOCKDEP 438 439 /* 440 * Slab sometimes uses the kmalloc slabs to store the slab headers 441 * for other slabs "off slab". 442 * The locking for this is tricky in that it nests within the locks 443 * of all other slabs in a few places; to deal with this special 444 * locking we put on-slab caches into a separate lock-class. 445 * 446 * We set lock class for alien array caches which are up during init. 447 * The lock annotation will be lost if all cpus of a node goes down and 448 * then comes back up during hotplug 449 */ 450 static struct lock_class_key on_slab_l3_key; 451 static struct lock_class_key on_slab_alc_key; 452 453 static struct lock_class_key debugobj_l3_key; 454 static struct lock_class_key debugobj_alc_key; 455 456 static void slab_set_lock_classes(struct kmem_cache *cachep, 457 struct lock_class_key *l3_key, struct lock_class_key *alc_key, 458 int q) 459 { 460 struct array_cache **alc; 461 struct kmem_cache_node *n; 462 int r; 463 464 n = cachep->node[q]; 465 if (!n) 466 return; 467 468 lockdep_set_class(&n->list_lock, l3_key); 469 alc = n->alien; 470 /* 471 * FIXME: This check for BAD_ALIEN_MAGIC 472 * should go away when common slab code is taught to 473 * work even without alien caches. 474 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 475 * for alloc_alien_cache, 476 */ 477 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 478 return; 479 for_each_node(r) { 480 if (alc[r]) 481 lockdep_set_class(&alc[r]->lock, alc_key); 482 } 483 } 484 485 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) 486 { 487 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); 488 } 489 490 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) 491 { 492 int node; 493 494 for_each_online_node(node) 495 slab_set_debugobj_lock_classes_node(cachep, node); 496 } 497 498 static void init_node_lock_keys(int q) 499 { 500 int i; 501 502 if (slab_state < UP) 503 return; 504 505 for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { 506 struct kmem_cache_node *n; 507 struct kmem_cache *cache = kmalloc_caches[i]; 508 509 if (!cache) 510 continue; 511 512 n = cache->node[q]; 513 if (!n || OFF_SLAB(cache)) 514 continue; 515 516 slab_set_lock_classes(cache, &on_slab_l3_key, 517 &on_slab_alc_key, q); 518 } 519 } 520 521 static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) 522 { 523 if (!cachep->node[q]) 524 return; 525 526 slab_set_lock_classes(cachep, &on_slab_l3_key, 527 &on_slab_alc_key, q); 528 } 529 530 static inline void on_slab_lock_classes(struct kmem_cache *cachep) 531 { 532 int node; 533 534 VM_BUG_ON(OFF_SLAB(cachep)); 535 for_each_node(node) 536 on_slab_lock_classes_node(cachep, node); 537 } 538 539 static inline void init_lock_keys(void) 540 { 541 int node; 542 543 for_each_node(node) 544 init_node_lock_keys(node); 545 } 546 #else 547 static void init_node_lock_keys(int q) 548 { 549 } 550 551 static inline void init_lock_keys(void) 552 { 553 } 554 555 static inline void on_slab_lock_classes(struct kmem_cache *cachep) 556 { 557 } 558 559 static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node) 560 { 561 } 562 563 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) 564 { 565 } 566 567 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) 568 { 569 } 570 #endif 571 572 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 573 574 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 575 { 576 return cachep->array[smp_processor_id()]; 577 } 578 579 static int calculate_nr_objs(size_t slab_size, size_t buffer_size, 580 size_t idx_size, size_t align) 581 { 582 int nr_objs; 583 size_t freelist_size; 584 585 /* 586 * Ignore padding for the initial guess. The padding 587 * is at most @align-1 bytes, and @buffer_size is at 588 * least @align. In the worst case, this result will 589 * be one greater than the number of objects that fit 590 * into the memory allocation when taking the padding 591 * into account. 592 */ 593 nr_objs = slab_size / (buffer_size + idx_size); 594 595 /* 596 * This calculated number will be either the right 597 * amount, or one greater than what we want. 598 */ 599 freelist_size = slab_size - nr_objs * buffer_size; 600 if (freelist_size < ALIGN(nr_objs * idx_size, align)) 601 nr_objs--; 602 603 return nr_objs; 604 } 605 606 /* 607 * Calculate the number of objects and left-over bytes for a given buffer size. 608 */ 609 static void cache_estimate(unsigned long gfporder, size_t buffer_size, 610 size_t align, int flags, size_t *left_over, 611 unsigned int *num) 612 { 613 int nr_objs; 614 size_t mgmt_size; 615 size_t slab_size = PAGE_SIZE << gfporder; 616 617 /* 618 * The slab management structure can be either off the slab or 619 * on it. For the latter case, the memory allocated for a 620 * slab is used for: 621 * 622 * - One unsigned int for each object 623 * - Padding to respect alignment of @align 624 * - @buffer_size bytes for each object 625 * 626 * If the slab management structure is off the slab, then the 627 * alignment will already be calculated into the size. Because 628 * the slabs are all pages aligned, the objects will be at the 629 * correct alignment when allocated. 630 */ 631 if (flags & CFLGS_OFF_SLAB) { 632 mgmt_size = 0; 633 nr_objs = slab_size / buffer_size; 634 635 } else { 636 nr_objs = calculate_nr_objs(slab_size, buffer_size, 637 sizeof(freelist_idx_t), align); 638 mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align); 639 } 640 *num = nr_objs; 641 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 642 } 643 644 #if DEBUG 645 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 646 647 static void __slab_error(const char *function, struct kmem_cache *cachep, 648 char *msg) 649 { 650 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 651 function, cachep->name, msg); 652 dump_stack(); 653 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 654 } 655 #endif 656 657 /* 658 * By default on NUMA we use alien caches to stage the freeing of 659 * objects allocated from other nodes. This causes massive memory 660 * inefficiencies when using fake NUMA setup to split memory into a 661 * large number of small nodes, so it can be disabled on the command 662 * line 663 */ 664 665 static int use_alien_caches __read_mostly = 1; 666 static int __init noaliencache_setup(char *s) 667 { 668 use_alien_caches = 0; 669 return 1; 670 } 671 __setup("noaliencache", noaliencache_setup); 672 673 static int __init slab_max_order_setup(char *str) 674 { 675 get_option(&str, &slab_max_order); 676 slab_max_order = slab_max_order < 0 ? 0 : 677 min(slab_max_order, MAX_ORDER - 1); 678 slab_max_order_set = true; 679 680 return 1; 681 } 682 __setup("slab_max_order=", slab_max_order_setup); 683 684 #ifdef CONFIG_NUMA 685 /* 686 * Special reaping functions for NUMA systems called from cache_reap(). 687 * These take care of doing round robin flushing of alien caches (containing 688 * objects freed on different nodes from which they were allocated) and the 689 * flushing of remote pcps by calling drain_node_pages. 690 */ 691 static DEFINE_PER_CPU(unsigned long, slab_reap_node); 692 693 static void init_reap_node(int cpu) 694 { 695 int node; 696 697 node = next_node(cpu_to_mem(cpu), node_online_map); 698 if (node == MAX_NUMNODES) 699 node = first_node(node_online_map); 700 701 per_cpu(slab_reap_node, cpu) = node; 702 } 703 704 static void next_reap_node(void) 705 { 706 int node = __this_cpu_read(slab_reap_node); 707 708 node = next_node(node, node_online_map); 709 if (unlikely(node >= MAX_NUMNODES)) 710 node = first_node(node_online_map); 711 __this_cpu_write(slab_reap_node, node); 712 } 713 714 #else 715 #define init_reap_node(cpu) do { } while (0) 716 #define next_reap_node(void) do { } while (0) 717 #endif 718 719 /* 720 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 721 * via the workqueue/eventd. 722 * Add the CPU number into the expiration time to minimize the possibility of 723 * the CPUs getting into lockstep and contending for the global cache chain 724 * lock. 725 */ 726 static void start_cpu_timer(int cpu) 727 { 728 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 729 730 /* 731 * When this gets called from do_initcalls via cpucache_init(), 732 * init_workqueues() has already run, so keventd will be setup 733 * at that time. 734 */ 735 if (keventd_up() && reap_work->work.func == NULL) { 736 init_reap_node(cpu); 737 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 738 schedule_delayed_work_on(cpu, reap_work, 739 __round_jiffies_relative(HZ, cpu)); 740 } 741 } 742 743 static struct array_cache *alloc_arraycache(int node, int entries, 744 int batchcount, gfp_t gfp) 745 { 746 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 747 struct array_cache *nc = NULL; 748 749 nc = kmalloc_node(memsize, gfp, node); 750 /* 751 * The array_cache structures contain pointers to free object. 752 * However, when such objects are allocated or transferred to another 753 * cache the pointers are not cleared and they could be counted as 754 * valid references during a kmemleak scan. Therefore, kmemleak must 755 * not scan such objects. 756 */ 757 kmemleak_no_scan(nc); 758 if (nc) { 759 nc->avail = 0; 760 nc->limit = entries; 761 nc->batchcount = batchcount; 762 nc->touched = 0; 763 spin_lock_init(&nc->lock); 764 } 765 return nc; 766 } 767 768 static inline bool is_slab_pfmemalloc(struct page *page) 769 { 770 return PageSlabPfmemalloc(page); 771 } 772 773 /* Clears pfmemalloc_active if no slabs have pfmalloc set */ 774 static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 775 struct array_cache *ac) 776 { 777 struct kmem_cache_node *n = cachep->node[numa_mem_id()]; 778 struct page *page; 779 unsigned long flags; 780 781 if (!pfmemalloc_active) 782 return; 783 784 spin_lock_irqsave(&n->list_lock, flags); 785 list_for_each_entry(page, &n->slabs_full, lru) 786 if (is_slab_pfmemalloc(page)) 787 goto out; 788 789 list_for_each_entry(page, &n->slabs_partial, lru) 790 if (is_slab_pfmemalloc(page)) 791 goto out; 792 793 list_for_each_entry(page, &n->slabs_free, lru) 794 if (is_slab_pfmemalloc(page)) 795 goto out; 796 797 pfmemalloc_active = false; 798 out: 799 spin_unlock_irqrestore(&n->list_lock, flags); 800 } 801 802 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, 803 gfp_t flags, bool force_refill) 804 { 805 int i; 806 void *objp = ac->entry[--ac->avail]; 807 808 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ 809 if (unlikely(is_obj_pfmemalloc(objp))) { 810 struct kmem_cache_node *n; 811 812 if (gfp_pfmemalloc_allowed(flags)) { 813 clear_obj_pfmemalloc(&objp); 814 return objp; 815 } 816 817 /* The caller cannot use PFMEMALLOC objects, find another one */ 818 for (i = 0; i < ac->avail; i++) { 819 /* If a !PFMEMALLOC object is found, swap them */ 820 if (!is_obj_pfmemalloc(ac->entry[i])) { 821 objp = ac->entry[i]; 822 ac->entry[i] = ac->entry[ac->avail]; 823 ac->entry[ac->avail] = objp; 824 return objp; 825 } 826 } 827 828 /* 829 * If there are empty slabs on the slabs_free list and we are 830 * being forced to refill the cache, mark this one !pfmemalloc. 831 */ 832 n = cachep->node[numa_mem_id()]; 833 if (!list_empty(&n->slabs_free) && force_refill) { 834 struct page *page = virt_to_head_page(objp); 835 ClearPageSlabPfmemalloc(page); 836 clear_obj_pfmemalloc(&objp); 837 recheck_pfmemalloc_active(cachep, ac); 838 return objp; 839 } 840 841 /* No !PFMEMALLOC objects available */ 842 ac->avail++; 843 objp = NULL; 844 } 845 846 return objp; 847 } 848 849 static inline void *ac_get_obj(struct kmem_cache *cachep, 850 struct array_cache *ac, gfp_t flags, bool force_refill) 851 { 852 void *objp; 853 854 if (unlikely(sk_memalloc_socks())) 855 objp = __ac_get_obj(cachep, ac, flags, force_refill); 856 else 857 objp = ac->entry[--ac->avail]; 858 859 return objp; 860 } 861 862 static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 863 void *objp) 864 { 865 if (unlikely(pfmemalloc_active)) { 866 /* Some pfmemalloc slabs exist, check if this is one */ 867 struct page *page = virt_to_head_page(objp); 868 if (PageSlabPfmemalloc(page)) 869 set_obj_pfmemalloc(&objp); 870 } 871 872 return objp; 873 } 874 875 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 876 void *objp) 877 { 878 if (unlikely(sk_memalloc_socks())) 879 objp = __ac_put_obj(cachep, ac, objp); 880 881 ac->entry[ac->avail++] = objp; 882 } 883 884 /* 885 * Transfer objects in one arraycache to another. 886 * Locking must be handled by the caller. 887 * 888 * Return the number of entries transferred. 889 */ 890 static int transfer_objects(struct array_cache *to, 891 struct array_cache *from, unsigned int max) 892 { 893 /* Figure out how many entries to transfer */ 894 int nr = min3(from->avail, max, to->limit - to->avail); 895 896 if (!nr) 897 return 0; 898 899 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 900 sizeof(void *) *nr); 901 902 from->avail -= nr; 903 to->avail += nr; 904 return nr; 905 } 906 907 #ifndef CONFIG_NUMA 908 909 #define drain_alien_cache(cachep, alien) do { } while (0) 910 #define reap_alien(cachep, n) do { } while (0) 911 912 static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 913 { 914 return (struct array_cache **)BAD_ALIEN_MAGIC; 915 } 916 917 static inline void free_alien_cache(struct array_cache **ac_ptr) 918 { 919 } 920 921 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 922 { 923 return 0; 924 } 925 926 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 927 gfp_t flags) 928 { 929 return NULL; 930 } 931 932 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 933 gfp_t flags, int nodeid) 934 { 935 return NULL; 936 } 937 938 #else /* CONFIG_NUMA */ 939 940 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 941 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 942 943 static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) 944 { 945 struct array_cache **ac_ptr; 946 int memsize = sizeof(void *) * nr_node_ids; 947 int i; 948 949 if (limit > 1) 950 limit = 12; 951 ac_ptr = kzalloc_node(memsize, gfp, node); 952 if (ac_ptr) { 953 for_each_node(i) { 954 if (i == node || !node_online(i)) 955 continue; 956 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); 957 if (!ac_ptr[i]) { 958 for (i--; i >= 0; i--) 959 kfree(ac_ptr[i]); 960 kfree(ac_ptr); 961 return NULL; 962 } 963 } 964 } 965 return ac_ptr; 966 } 967 968 static void free_alien_cache(struct array_cache **ac_ptr) 969 { 970 int i; 971 972 if (!ac_ptr) 973 return; 974 for_each_node(i) 975 kfree(ac_ptr[i]); 976 kfree(ac_ptr); 977 } 978 979 static void __drain_alien_cache(struct kmem_cache *cachep, 980 struct array_cache *ac, int node) 981 { 982 struct kmem_cache_node *n = cachep->node[node]; 983 984 if (ac->avail) { 985 spin_lock(&n->list_lock); 986 /* 987 * Stuff objects into the remote nodes shared array first. 988 * That way we could avoid the overhead of putting the objects 989 * into the free lists and getting them back later. 990 */ 991 if (n->shared) 992 transfer_objects(n->shared, ac, ac->limit); 993 994 free_block(cachep, ac->entry, ac->avail, node); 995 ac->avail = 0; 996 spin_unlock(&n->list_lock); 997 } 998 } 999 1000 /* 1001 * Called from cache_reap() to regularly drain alien caches round robin. 1002 */ 1003 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) 1004 { 1005 int node = __this_cpu_read(slab_reap_node); 1006 1007 if (n->alien) { 1008 struct array_cache *ac = n->alien[node]; 1009 1010 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1011 __drain_alien_cache(cachep, ac, node); 1012 spin_unlock_irq(&ac->lock); 1013 } 1014 } 1015 } 1016 1017 static void drain_alien_cache(struct kmem_cache *cachep, 1018 struct array_cache **alien) 1019 { 1020 int i = 0; 1021 struct array_cache *ac; 1022 unsigned long flags; 1023 1024 for_each_online_node(i) { 1025 ac = alien[i]; 1026 if (ac) { 1027 spin_lock_irqsave(&ac->lock, flags); 1028 __drain_alien_cache(cachep, ac, i); 1029 spin_unlock_irqrestore(&ac->lock, flags); 1030 } 1031 } 1032 } 1033 1034 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1035 { 1036 int nodeid = page_to_nid(virt_to_page(objp)); 1037 struct kmem_cache_node *n; 1038 struct array_cache *alien = NULL; 1039 int node; 1040 1041 node = numa_mem_id(); 1042 1043 /* 1044 * Make sure we are not freeing a object from another node to the array 1045 * cache on this cpu. 1046 */ 1047 if (likely(nodeid == node)) 1048 return 0; 1049 1050 n = cachep->node[node]; 1051 STATS_INC_NODEFREES(cachep); 1052 if (n->alien && n->alien[nodeid]) { 1053 alien = n->alien[nodeid]; 1054 spin_lock(&alien->lock); 1055 if (unlikely(alien->avail == alien->limit)) { 1056 STATS_INC_ACOVERFLOW(cachep); 1057 __drain_alien_cache(cachep, alien, nodeid); 1058 } 1059 ac_put_obj(cachep, alien, objp); 1060 spin_unlock(&alien->lock); 1061 } else { 1062 spin_lock(&(cachep->node[nodeid])->list_lock); 1063 free_block(cachep, &objp, 1, nodeid); 1064 spin_unlock(&(cachep->node[nodeid])->list_lock); 1065 } 1066 return 1; 1067 } 1068 #endif 1069 1070 /* 1071 * Allocates and initializes node for a node on each slab cache, used for 1072 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 1073 * will be allocated off-node since memory is not yet online for the new node. 1074 * When hotplugging memory or a cpu, existing node are not replaced if 1075 * already in use. 1076 * 1077 * Must hold slab_mutex. 1078 */ 1079 static int init_cache_node_node(int node) 1080 { 1081 struct kmem_cache *cachep; 1082 struct kmem_cache_node *n; 1083 const int memsize = sizeof(struct kmem_cache_node); 1084 1085 list_for_each_entry(cachep, &slab_caches, list) { 1086 /* 1087 * Set up the kmem_cache_node for cpu before we can 1088 * begin anything. Make sure some other cpu on this 1089 * node has not already allocated this 1090 */ 1091 if (!cachep->node[node]) { 1092 n = kmalloc_node(memsize, GFP_KERNEL, node); 1093 if (!n) 1094 return -ENOMEM; 1095 kmem_cache_node_init(n); 1096 n->next_reap = jiffies + REAPTIMEOUT_NODE + 1097 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1098 1099 /* 1100 * The kmem_cache_nodes don't come and go as CPUs 1101 * come and go. slab_mutex is sufficient 1102 * protection here. 1103 */ 1104 cachep->node[node] = n; 1105 } 1106 1107 spin_lock_irq(&cachep->node[node]->list_lock); 1108 cachep->node[node]->free_limit = 1109 (1 + nr_cpus_node(node)) * 1110 cachep->batchcount + cachep->num; 1111 spin_unlock_irq(&cachep->node[node]->list_lock); 1112 } 1113 return 0; 1114 } 1115 1116 static inline int slabs_tofree(struct kmem_cache *cachep, 1117 struct kmem_cache_node *n) 1118 { 1119 return (n->free_objects + cachep->num - 1) / cachep->num; 1120 } 1121 1122 static void cpuup_canceled(long cpu) 1123 { 1124 struct kmem_cache *cachep; 1125 struct kmem_cache_node *n = NULL; 1126 int node = cpu_to_mem(cpu); 1127 const struct cpumask *mask = cpumask_of_node(node); 1128 1129 list_for_each_entry(cachep, &slab_caches, list) { 1130 struct array_cache *nc; 1131 struct array_cache *shared; 1132 struct array_cache **alien; 1133 1134 /* cpu is dead; no one can alloc from it. */ 1135 nc = cachep->array[cpu]; 1136 cachep->array[cpu] = NULL; 1137 n = cachep->node[node]; 1138 1139 if (!n) 1140 goto free_array_cache; 1141 1142 spin_lock_irq(&n->list_lock); 1143 1144 /* Free limit for this kmem_cache_node */ 1145 n->free_limit -= cachep->batchcount; 1146 if (nc) 1147 free_block(cachep, nc->entry, nc->avail, node); 1148 1149 if (!cpumask_empty(mask)) { 1150 spin_unlock_irq(&n->list_lock); 1151 goto free_array_cache; 1152 } 1153 1154 shared = n->shared; 1155 if (shared) { 1156 free_block(cachep, shared->entry, 1157 shared->avail, node); 1158 n->shared = NULL; 1159 } 1160 1161 alien = n->alien; 1162 n->alien = NULL; 1163 1164 spin_unlock_irq(&n->list_lock); 1165 1166 kfree(shared); 1167 if (alien) { 1168 drain_alien_cache(cachep, alien); 1169 free_alien_cache(alien); 1170 } 1171 free_array_cache: 1172 kfree(nc); 1173 } 1174 /* 1175 * In the previous loop, all the objects were freed to 1176 * the respective cache's slabs, now we can go ahead and 1177 * shrink each nodelist to its limit. 1178 */ 1179 list_for_each_entry(cachep, &slab_caches, list) { 1180 n = cachep->node[node]; 1181 if (!n) 1182 continue; 1183 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1184 } 1185 } 1186 1187 static int cpuup_prepare(long cpu) 1188 { 1189 struct kmem_cache *cachep; 1190 struct kmem_cache_node *n = NULL; 1191 int node = cpu_to_mem(cpu); 1192 int err; 1193 1194 /* 1195 * We need to do this right in the beginning since 1196 * alloc_arraycache's are going to use this list. 1197 * kmalloc_node allows us to add the slab to the right 1198 * kmem_cache_node and not this cpu's kmem_cache_node 1199 */ 1200 err = init_cache_node_node(node); 1201 if (err < 0) 1202 goto bad; 1203 1204 /* 1205 * Now we can go ahead with allocating the shared arrays and 1206 * array caches 1207 */ 1208 list_for_each_entry(cachep, &slab_caches, list) { 1209 struct array_cache *nc; 1210 struct array_cache *shared = NULL; 1211 struct array_cache **alien = NULL; 1212 1213 nc = alloc_arraycache(node, cachep->limit, 1214 cachep->batchcount, GFP_KERNEL); 1215 if (!nc) 1216 goto bad; 1217 if (cachep->shared) { 1218 shared = alloc_arraycache(node, 1219 cachep->shared * cachep->batchcount, 1220 0xbaadf00d, GFP_KERNEL); 1221 if (!shared) { 1222 kfree(nc); 1223 goto bad; 1224 } 1225 } 1226 if (use_alien_caches) { 1227 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); 1228 if (!alien) { 1229 kfree(shared); 1230 kfree(nc); 1231 goto bad; 1232 } 1233 } 1234 cachep->array[cpu] = nc; 1235 n = cachep->node[node]; 1236 BUG_ON(!n); 1237 1238 spin_lock_irq(&n->list_lock); 1239 if (!n->shared) { 1240 /* 1241 * We are serialised from CPU_DEAD or 1242 * CPU_UP_CANCELLED by the cpucontrol lock 1243 */ 1244 n->shared = shared; 1245 shared = NULL; 1246 } 1247 #ifdef CONFIG_NUMA 1248 if (!n->alien) { 1249 n->alien = alien; 1250 alien = NULL; 1251 } 1252 #endif 1253 spin_unlock_irq(&n->list_lock); 1254 kfree(shared); 1255 free_alien_cache(alien); 1256 if (cachep->flags & SLAB_DEBUG_OBJECTS) 1257 slab_set_debugobj_lock_classes_node(cachep, node); 1258 else if (!OFF_SLAB(cachep) && 1259 !(cachep->flags & SLAB_DESTROY_BY_RCU)) 1260 on_slab_lock_classes_node(cachep, node); 1261 } 1262 init_node_lock_keys(node); 1263 1264 return 0; 1265 bad: 1266 cpuup_canceled(cpu); 1267 return -ENOMEM; 1268 } 1269 1270 static int cpuup_callback(struct notifier_block *nfb, 1271 unsigned long action, void *hcpu) 1272 { 1273 long cpu = (long)hcpu; 1274 int err = 0; 1275 1276 switch (action) { 1277 case CPU_UP_PREPARE: 1278 case CPU_UP_PREPARE_FROZEN: 1279 mutex_lock(&slab_mutex); 1280 err = cpuup_prepare(cpu); 1281 mutex_unlock(&slab_mutex); 1282 break; 1283 case CPU_ONLINE: 1284 case CPU_ONLINE_FROZEN: 1285 start_cpu_timer(cpu); 1286 break; 1287 #ifdef CONFIG_HOTPLUG_CPU 1288 case CPU_DOWN_PREPARE: 1289 case CPU_DOWN_PREPARE_FROZEN: 1290 /* 1291 * Shutdown cache reaper. Note that the slab_mutex is 1292 * held so that if cache_reap() is invoked it cannot do 1293 * anything expensive but will only modify reap_work 1294 * and reschedule the timer. 1295 */ 1296 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1297 /* Now the cache_reaper is guaranteed to be not running. */ 1298 per_cpu(slab_reap_work, cpu).work.func = NULL; 1299 break; 1300 case CPU_DOWN_FAILED: 1301 case CPU_DOWN_FAILED_FROZEN: 1302 start_cpu_timer(cpu); 1303 break; 1304 case CPU_DEAD: 1305 case CPU_DEAD_FROZEN: 1306 /* 1307 * Even if all the cpus of a node are down, we don't free the 1308 * kmem_cache_node of any cache. This to avoid a race between 1309 * cpu_down, and a kmalloc allocation from another cpu for 1310 * memory from the node of the cpu going down. The node 1311 * structure is usually allocated from kmem_cache_create() and 1312 * gets destroyed at kmem_cache_destroy(). 1313 */ 1314 /* fall through */ 1315 #endif 1316 case CPU_UP_CANCELED: 1317 case CPU_UP_CANCELED_FROZEN: 1318 mutex_lock(&slab_mutex); 1319 cpuup_canceled(cpu); 1320 mutex_unlock(&slab_mutex); 1321 break; 1322 } 1323 return notifier_from_errno(err); 1324 } 1325 1326 static struct notifier_block cpucache_notifier = { 1327 &cpuup_callback, NULL, 0 1328 }; 1329 1330 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1331 /* 1332 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1333 * Returns -EBUSY if all objects cannot be drained so that the node is not 1334 * removed. 1335 * 1336 * Must hold slab_mutex. 1337 */ 1338 static int __meminit drain_cache_node_node(int node) 1339 { 1340 struct kmem_cache *cachep; 1341 int ret = 0; 1342 1343 list_for_each_entry(cachep, &slab_caches, list) { 1344 struct kmem_cache_node *n; 1345 1346 n = cachep->node[node]; 1347 if (!n) 1348 continue; 1349 1350 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 1351 1352 if (!list_empty(&n->slabs_full) || 1353 !list_empty(&n->slabs_partial)) { 1354 ret = -EBUSY; 1355 break; 1356 } 1357 } 1358 return ret; 1359 } 1360 1361 static int __meminit slab_memory_callback(struct notifier_block *self, 1362 unsigned long action, void *arg) 1363 { 1364 struct memory_notify *mnb = arg; 1365 int ret = 0; 1366 int nid; 1367 1368 nid = mnb->status_change_nid; 1369 if (nid < 0) 1370 goto out; 1371 1372 switch (action) { 1373 case MEM_GOING_ONLINE: 1374 mutex_lock(&slab_mutex); 1375 ret = init_cache_node_node(nid); 1376 mutex_unlock(&slab_mutex); 1377 break; 1378 case MEM_GOING_OFFLINE: 1379 mutex_lock(&slab_mutex); 1380 ret = drain_cache_node_node(nid); 1381 mutex_unlock(&slab_mutex); 1382 break; 1383 case MEM_ONLINE: 1384 case MEM_OFFLINE: 1385 case MEM_CANCEL_ONLINE: 1386 case MEM_CANCEL_OFFLINE: 1387 break; 1388 } 1389 out: 1390 return notifier_from_errno(ret); 1391 } 1392 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1393 1394 /* 1395 * swap the static kmem_cache_node with kmalloced memory 1396 */ 1397 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, 1398 int nodeid) 1399 { 1400 struct kmem_cache_node *ptr; 1401 1402 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); 1403 BUG_ON(!ptr); 1404 1405 memcpy(ptr, list, sizeof(struct kmem_cache_node)); 1406 /* 1407 * Do not assume that spinlocks can be initialized via memcpy: 1408 */ 1409 spin_lock_init(&ptr->list_lock); 1410 1411 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1412 cachep->node[nodeid] = ptr; 1413 } 1414 1415 /* 1416 * For setting up all the kmem_cache_node for cache whose buffer_size is same as 1417 * size of kmem_cache_node. 1418 */ 1419 static void __init set_up_node(struct kmem_cache *cachep, int index) 1420 { 1421 int node; 1422 1423 for_each_online_node(node) { 1424 cachep->node[node] = &init_kmem_cache_node[index + node]; 1425 cachep->node[node]->next_reap = jiffies + 1426 REAPTIMEOUT_NODE + 1427 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 1428 } 1429 } 1430 1431 /* 1432 * The memory after the last cpu cache pointer is used for the 1433 * the node pointer. 1434 */ 1435 static void setup_node_pointer(struct kmem_cache *cachep) 1436 { 1437 cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; 1438 } 1439 1440 /* 1441 * Initialisation. Called after the page allocator have been initialised and 1442 * before smp_init(). 1443 */ 1444 void __init kmem_cache_init(void) 1445 { 1446 int i; 1447 1448 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < 1449 sizeof(struct rcu_head)); 1450 kmem_cache = &kmem_cache_boot; 1451 setup_node_pointer(kmem_cache); 1452 1453 if (num_possible_nodes() == 1) 1454 use_alien_caches = 0; 1455 1456 for (i = 0; i < NUM_INIT_LISTS; i++) 1457 kmem_cache_node_init(&init_kmem_cache_node[i]); 1458 1459 set_up_node(kmem_cache, CACHE_CACHE); 1460 1461 /* 1462 * Fragmentation resistance on low memory - only use bigger 1463 * page orders on machines with more than 32MB of memory if 1464 * not overridden on the command line. 1465 */ 1466 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) 1467 slab_max_order = SLAB_MAX_ORDER_HI; 1468 1469 /* Bootstrap is tricky, because several objects are allocated 1470 * from caches that do not exist yet: 1471 * 1) initialize the kmem_cache cache: it contains the struct 1472 * kmem_cache structures of all caches, except kmem_cache itself: 1473 * kmem_cache is statically allocated. 1474 * Initially an __init data area is used for the head array and the 1475 * kmem_cache_node structures, it's replaced with a kmalloc allocated 1476 * array at the end of the bootstrap. 1477 * 2) Create the first kmalloc cache. 1478 * The struct kmem_cache for the new cache is allocated normally. 1479 * An __init data area is used for the head array. 1480 * 3) Create the remaining kmalloc caches, with minimally sized 1481 * head arrays. 1482 * 4) Replace the __init data head arrays for kmem_cache and the first 1483 * kmalloc cache with kmalloc allocated arrays. 1484 * 5) Replace the __init data for kmem_cache_node for kmem_cache and 1485 * the other cache's with kmalloc allocated memory. 1486 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1487 */ 1488 1489 /* 1) create the kmem_cache */ 1490 1491 /* 1492 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1493 */ 1494 create_boot_cache(kmem_cache, "kmem_cache", 1495 offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1496 nr_node_ids * sizeof(struct kmem_cache_node *), 1497 SLAB_HWCACHE_ALIGN); 1498 list_add(&kmem_cache->list, &slab_caches); 1499 1500 /* 2+3) create the kmalloc caches */ 1501 1502 /* 1503 * Initialize the caches that provide memory for the array cache and the 1504 * kmem_cache_node structures first. Without this, further allocations will 1505 * bug. 1506 */ 1507 1508 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", 1509 kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); 1510 1511 if (INDEX_AC != INDEX_NODE) 1512 kmalloc_caches[INDEX_NODE] = 1513 create_kmalloc_cache("kmalloc-node", 1514 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); 1515 1516 slab_early_init = 0; 1517 1518 /* 4) Replace the bootstrap head arrays */ 1519 { 1520 struct array_cache *ptr; 1521 1522 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1523 1524 memcpy(ptr, cpu_cache_get(kmem_cache), 1525 sizeof(struct arraycache_init)); 1526 /* 1527 * Do not assume that spinlocks can be initialized via memcpy: 1528 */ 1529 spin_lock_init(&ptr->lock); 1530 1531 kmem_cache->array[smp_processor_id()] = ptr; 1532 1533 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); 1534 1535 BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) 1536 != &initarray_generic.cache); 1537 memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), 1538 sizeof(struct arraycache_init)); 1539 /* 1540 * Do not assume that spinlocks can be initialized via memcpy: 1541 */ 1542 spin_lock_init(&ptr->lock); 1543 1544 kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; 1545 } 1546 /* 5) Replace the bootstrap kmem_cache_node */ 1547 { 1548 int nid; 1549 1550 for_each_online_node(nid) { 1551 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1552 1553 init_list(kmalloc_caches[INDEX_AC], 1554 &init_kmem_cache_node[SIZE_AC + nid], nid); 1555 1556 if (INDEX_AC != INDEX_NODE) { 1557 init_list(kmalloc_caches[INDEX_NODE], 1558 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1559 } 1560 } 1561 } 1562 1563 create_kmalloc_caches(ARCH_KMALLOC_FLAGS); 1564 } 1565 1566 void __init kmem_cache_init_late(void) 1567 { 1568 struct kmem_cache *cachep; 1569 1570 slab_state = UP; 1571 1572 /* 6) resize the head arrays to their final sizes */ 1573 mutex_lock(&slab_mutex); 1574 list_for_each_entry(cachep, &slab_caches, list) 1575 if (enable_cpucache(cachep, GFP_NOWAIT)) 1576 BUG(); 1577 mutex_unlock(&slab_mutex); 1578 1579 /* Annotate slab for lockdep -- annotate the malloc caches */ 1580 init_lock_keys(); 1581 1582 /* Done! */ 1583 slab_state = FULL; 1584 1585 /* 1586 * Register a cpu startup notifier callback that initializes 1587 * cpu_cache_get for all new cpus 1588 */ 1589 register_cpu_notifier(&cpucache_notifier); 1590 1591 #ifdef CONFIG_NUMA 1592 /* 1593 * Register a memory hotplug callback that initializes and frees 1594 * node. 1595 */ 1596 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 1597 #endif 1598 1599 /* 1600 * The reap timers are started later, with a module init call: That part 1601 * of the kernel is not yet operational. 1602 */ 1603 } 1604 1605 static int __init cpucache_init(void) 1606 { 1607 int cpu; 1608 1609 /* 1610 * Register the timers that return unneeded pages to the page allocator 1611 */ 1612 for_each_online_cpu(cpu) 1613 start_cpu_timer(cpu); 1614 1615 /* Done! */ 1616 slab_state = FULL; 1617 return 0; 1618 } 1619 __initcall(cpucache_init); 1620 1621 static noinline void 1622 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1623 { 1624 struct kmem_cache_node *n; 1625 struct page *page; 1626 unsigned long flags; 1627 int node; 1628 1629 printk(KERN_WARNING 1630 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", 1631 nodeid, gfpflags); 1632 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", 1633 cachep->name, cachep->size, cachep->gfporder); 1634 1635 for_each_online_node(node) { 1636 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1637 unsigned long active_slabs = 0, num_slabs = 0; 1638 1639 n = cachep->node[node]; 1640 if (!n) 1641 continue; 1642 1643 spin_lock_irqsave(&n->list_lock, flags); 1644 list_for_each_entry(page, &n->slabs_full, lru) { 1645 active_objs += cachep->num; 1646 active_slabs++; 1647 } 1648 list_for_each_entry(page, &n->slabs_partial, lru) { 1649 active_objs += page->active; 1650 active_slabs++; 1651 } 1652 list_for_each_entry(page, &n->slabs_free, lru) 1653 num_slabs++; 1654 1655 free_objects += n->free_objects; 1656 spin_unlock_irqrestore(&n->list_lock, flags); 1657 1658 num_slabs += active_slabs; 1659 num_objs = num_slabs * cachep->num; 1660 printk(KERN_WARNING 1661 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", 1662 node, active_slabs, num_slabs, active_objs, num_objs, 1663 free_objects); 1664 } 1665 } 1666 1667 /* 1668 * Interface to system's page allocator. No need to hold the cache-lock. 1669 * 1670 * If we requested dmaable memory, we will get it. Even if we 1671 * did not request dmaable memory, we might get it, but that 1672 * would be relatively rare and ignorable. 1673 */ 1674 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, 1675 int nodeid) 1676 { 1677 struct page *page; 1678 int nr_pages; 1679 1680 flags |= cachep->allocflags; 1681 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1682 flags |= __GFP_RECLAIMABLE; 1683 1684 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1685 if (!page) { 1686 if (!(flags & __GFP_NOWARN) && printk_ratelimit()) 1687 slab_out_of_memory(cachep, flags, nodeid); 1688 return NULL; 1689 } 1690 1691 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1692 if (unlikely(page->pfmemalloc)) 1693 pfmemalloc_active = true; 1694 1695 nr_pages = (1 << cachep->gfporder); 1696 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1697 add_zone_page_state(page_zone(page), 1698 NR_SLAB_RECLAIMABLE, nr_pages); 1699 else 1700 add_zone_page_state(page_zone(page), 1701 NR_SLAB_UNRECLAIMABLE, nr_pages); 1702 __SetPageSlab(page); 1703 if (page->pfmemalloc) 1704 SetPageSlabPfmemalloc(page); 1705 memcg_bind_pages(cachep, cachep->gfporder); 1706 1707 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1708 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1709 1710 if (cachep->ctor) 1711 kmemcheck_mark_uninitialized_pages(page, nr_pages); 1712 else 1713 kmemcheck_mark_unallocated_pages(page, nr_pages); 1714 } 1715 1716 return page; 1717 } 1718 1719 /* 1720 * Interface to system's page release. 1721 */ 1722 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) 1723 { 1724 const unsigned long nr_freed = (1 << cachep->gfporder); 1725 1726 kmemcheck_free_shadow(page, cachep->gfporder); 1727 1728 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1729 sub_zone_page_state(page_zone(page), 1730 NR_SLAB_RECLAIMABLE, nr_freed); 1731 else 1732 sub_zone_page_state(page_zone(page), 1733 NR_SLAB_UNRECLAIMABLE, nr_freed); 1734 1735 BUG_ON(!PageSlab(page)); 1736 __ClearPageSlabPfmemalloc(page); 1737 __ClearPageSlab(page); 1738 page_mapcount_reset(page); 1739 page->mapping = NULL; 1740 1741 memcg_release_pages(cachep, cachep->gfporder); 1742 if (current->reclaim_state) 1743 current->reclaim_state->reclaimed_slab += nr_freed; 1744 __free_memcg_kmem_pages(page, cachep->gfporder); 1745 } 1746 1747 static void kmem_rcu_free(struct rcu_head *head) 1748 { 1749 struct kmem_cache *cachep; 1750 struct page *page; 1751 1752 page = container_of(head, struct page, rcu_head); 1753 cachep = page->slab_cache; 1754 1755 kmem_freepages(cachep, page); 1756 } 1757 1758 #if DEBUG 1759 1760 #ifdef CONFIG_DEBUG_PAGEALLOC 1761 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1762 unsigned long caller) 1763 { 1764 int size = cachep->object_size; 1765 1766 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1767 1768 if (size < 5 * sizeof(unsigned long)) 1769 return; 1770 1771 *addr++ = 0x12345678; 1772 *addr++ = caller; 1773 *addr++ = smp_processor_id(); 1774 size -= 3 * sizeof(unsigned long); 1775 { 1776 unsigned long *sptr = &caller; 1777 unsigned long svalue; 1778 1779 while (!kstack_end(sptr)) { 1780 svalue = *sptr++; 1781 if (kernel_text_address(svalue)) { 1782 *addr++ = svalue; 1783 size -= sizeof(unsigned long); 1784 if (size <= sizeof(unsigned long)) 1785 break; 1786 } 1787 } 1788 1789 } 1790 *addr++ = 0x87654321; 1791 } 1792 #endif 1793 1794 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1795 { 1796 int size = cachep->object_size; 1797 addr = &((char *)addr)[obj_offset(cachep)]; 1798 1799 memset(addr, val, size); 1800 *(unsigned char *)(addr + size - 1) = POISON_END; 1801 } 1802 1803 static void dump_line(char *data, int offset, int limit) 1804 { 1805 int i; 1806 unsigned char error = 0; 1807 int bad_count = 0; 1808 1809 printk(KERN_ERR "%03x: ", offset); 1810 for (i = 0; i < limit; i++) { 1811 if (data[offset + i] != POISON_FREE) { 1812 error = data[offset + i]; 1813 bad_count++; 1814 } 1815 } 1816 print_hex_dump(KERN_CONT, "", 0, 16, 1, 1817 &data[offset], limit, 1); 1818 1819 if (bad_count == 1) { 1820 error ^= POISON_FREE; 1821 if (!(error & (error - 1))) { 1822 printk(KERN_ERR "Single bit error detected. Probably " 1823 "bad RAM.\n"); 1824 #ifdef CONFIG_X86 1825 printk(KERN_ERR "Run memtest86+ or a similar memory " 1826 "test tool.\n"); 1827 #else 1828 printk(KERN_ERR "Run a memory test tool.\n"); 1829 #endif 1830 } 1831 } 1832 } 1833 #endif 1834 1835 #if DEBUG 1836 1837 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1838 { 1839 int i, size; 1840 char *realobj; 1841 1842 if (cachep->flags & SLAB_RED_ZONE) { 1843 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1844 *dbg_redzone1(cachep, objp), 1845 *dbg_redzone2(cachep, objp)); 1846 } 1847 1848 if (cachep->flags & SLAB_STORE_USER) { 1849 printk(KERN_ERR "Last user: [<%p>](%pSR)\n", 1850 *dbg_userword(cachep, objp), 1851 *dbg_userword(cachep, objp)); 1852 } 1853 realobj = (char *)objp + obj_offset(cachep); 1854 size = cachep->object_size; 1855 for (i = 0; i < size && lines; i += 16, lines--) { 1856 int limit; 1857 limit = 16; 1858 if (i + limit > size) 1859 limit = size - i; 1860 dump_line(realobj, i, limit); 1861 } 1862 } 1863 1864 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1865 { 1866 char *realobj; 1867 int size, i; 1868 int lines = 0; 1869 1870 realobj = (char *)objp + obj_offset(cachep); 1871 size = cachep->object_size; 1872 1873 for (i = 0; i < size; i++) { 1874 char exp = POISON_FREE; 1875 if (i == size - 1) 1876 exp = POISON_END; 1877 if (realobj[i] != exp) { 1878 int limit; 1879 /* Mismatch ! */ 1880 /* Print header */ 1881 if (lines == 0) { 1882 printk(KERN_ERR 1883 "Slab corruption (%s): %s start=%p, len=%d\n", 1884 print_tainted(), cachep->name, realobj, size); 1885 print_objinfo(cachep, objp, 0); 1886 } 1887 /* Hexdump the affected line */ 1888 i = (i / 16) * 16; 1889 limit = 16; 1890 if (i + limit > size) 1891 limit = size - i; 1892 dump_line(realobj, i, limit); 1893 i += 16; 1894 lines++; 1895 /* Limit to 5 lines */ 1896 if (lines > 5) 1897 break; 1898 } 1899 } 1900 if (lines != 0) { 1901 /* Print some data about the neighboring objects, if they 1902 * exist: 1903 */ 1904 struct page *page = virt_to_head_page(objp); 1905 unsigned int objnr; 1906 1907 objnr = obj_to_index(cachep, page, objp); 1908 if (objnr) { 1909 objp = index_to_obj(cachep, page, objnr - 1); 1910 realobj = (char *)objp + obj_offset(cachep); 1911 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1912 realobj, size); 1913 print_objinfo(cachep, objp, 2); 1914 } 1915 if (objnr + 1 < cachep->num) { 1916 objp = index_to_obj(cachep, page, objnr + 1); 1917 realobj = (char *)objp + obj_offset(cachep); 1918 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1919 realobj, size); 1920 print_objinfo(cachep, objp, 2); 1921 } 1922 } 1923 } 1924 #endif 1925 1926 #if DEBUG 1927 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1928 struct page *page) 1929 { 1930 int i; 1931 for (i = 0; i < cachep->num; i++) { 1932 void *objp = index_to_obj(cachep, page, i); 1933 1934 if (cachep->flags & SLAB_POISON) { 1935 #ifdef CONFIG_DEBUG_PAGEALLOC 1936 if (cachep->size % PAGE_SIZE == 0 && 1937 OFF_SLAB(cachep)) 1938 kernel_map_pages(virt_to_page(objp), 1939 cachep->size / PAGE_SIZE, 1); 1940 else 1941 check_poison_obj(cachep, objp); 1942 #else 1943 check_poison_obj(cachep, objp); 1944 #endif 1945 } 1946 if (cachep->flags & SLAB_RED_ZONE) { 1947 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1948 slab_error(cachep, "start of a freed object " 1949 "was overwritten"); 1950 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1951 slab_error(cachep, "end of a freed object " 1952 "was overwritten"); 1953 } 1954 } 1955 } 1956 #else 1957 static void slab_destroy_debugcheck(struct kmem_cache *cachep, 1958 struct page *page) 1959 { 1960 } 1961 #endif 1962 1963 /** 1964 * slab_destroy - destroy and release all objects in a slab 1965 * @cachep: cache pointer being destroyed 1966 * @page: page pointer being destroyed 1967 * 1968 * Destroy all the objs in a slab, and release the mem back to the system. 1969 * Before calling the slab must have been unlinked from the cache. The 1970 * cache-lock is not held/needed. 1971 */ 1972 static void slab_destroy(struct kmem_cache *cachep, struct page *page) 1973 { 1974 void *freelist; 1975 1976 freelist = page->freelist; 1977 slab_destroy_debugcheck(cachep, page); 1978 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1979 struct rcu_head *head; 1980 1981 /* 1982 * RCU free overloads the RCU head over the LRU. 1983 * slab_page has been overloeaded over the LRU, 1984 * however it is not used from now on so that 1985 * we can use it safely. 1986 */ 1987 head = (void *)&page->rcu_head; 1988 call_rcu(head, kmem_rcu_free); 1989 1990 } else { 1991 kmem_freepages(cachep, page); 1992 } 1993 1994 /* 1995 * From now on, we don't use freelist 1996 * although actual page can be freed in rcu context 1997 */ 1998 if (OFF_SLAB(cachep)) 1999 kmem_cache_free(cachep->freelist_cache, freelist); 2000 } 2001 2002 /** 2003 * calculate_slab_order - calculate size (page order) of slabs 2004 * @cachep: pointer to the cache that is being created 2005 * @size: size of objects to be created in this cache. 2006 * @align: required alignment for the objects. 2007 * @flags: slab allocation flags 2008 * 2009 * Also calculates the number of objects per slab. 2010 * 2011 * This could be made much more intelligent. For now, try to avoid using 2012 * high order pages for slabs. When the gfp() functions are more friendly 2013 * towards high-order requests, this should be changed. 2014 */ 2015 static size_t calculate_slab_order(struct kmem_cache *cachep, 2016 size_t size, size_t align, unsigned long flags) 2017 { 2018 unsigned long offslab_limit; 2019 size_t left_over = 0; 2020 int gfporder; 2021 2022 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 2023 unsigned int num; 2024 size_t remainder; 2025 2026 cache_estimate(gfporder, size, align, flags, &remainder, &num); 2027 if (!num) 2028 continue; 2029 2030 /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ 2031 if (num > SLAB_OBJ_MAX_NUM) 2032 break; 2033 2034 if (flags & CFLGS_OFF_SLAB) { 2035 /* 2036 * Max number of objs-per-slab for caches which 2037 * use off-slab slabs. Needed to avoid a possible 2038 * looping condition in cache_grow(). 2039 */ 2040 offslab_limit = size; 2041 offslab_limit /= sizeof(freelist_idx_t); 2042 2043 if (num > offslab_limit) 2044 break; 2045 } 2046 2047 /* Found something acceptable - save it away */ 2048 cachep->num = num; 2049 cachep->gfporder = gfporder; 2050 left_over = remainder; 2051 2052 /* 2053 * A VFS-reclaimable slab tends to have most allocations 2054 * as GFP_NOFS and we really don't want to have to be allocating 2055 * higher-order pages when we are unable to shrink dcache. 2056 */ 2057 if (flags & SLAB_RECLAIM_ACCOUNT) 2058 break; 2059 2060 /* 2061 * Large number of objects is good, but very large slabs are 2062 * currently bad for the gfp()s. 2063 */ 2064 if (gfporder >= slab_max_order) 2065 break; 2066 2067 /* 2068 * Acceptable internal fragmentation? 2069 */ 2070 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2071 break; 2072 } 2073 return left_over; 2074 } 2075 2076 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2077 { 2078 if (slab_state >= FULL) 2079 return enable_cpucache(cachep, gfp); 2080 2081 if (slab_state == DOWN) { 2082 /* 2083 * Note: Creation of first cache (kmem_cache). 2084 * The setup_node is taken care 2085 * of by the caller of __kmem_cache_create 2086 */ 2087 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2088 slab_state = PARTIAL; 2089 } else if (slab_state == PARTIAL) { 2090 /* 2091 * Note: the second kmem_cache_create must create the cache 2092 * that's used by kmalloc(24), otherwise the creation of 2093 * further caches will BUG(). 2094 */ 2095 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2096 2097 /* 2098 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is 2099 * the second cache, then we need to set up all its node/, 2100 * otherwise the creation of further caches will BUG(). 2101 */ 2102 set_up_node(cachep, SIZE_AC); 2103 if (INDEX_AC == INDEX_NODE) 2104 slab_state = PARTIAL_NODE; 2105 else 2106 slab_state = PARTIAL_ARRAYCACHE; 2107 } else { 2108 /* Remaining boot caches */ 2109 cachep->array[smp_processor_id()] = 2110 kmalloc(sizeof(struct arraycache_init), gfp); 2111 2112 if (slab_state == PARTIAL_ARRAYCACHE) { 2113 set_up_node(cachep, SIZE_NODE); 2114 slab_state = PARTIAL_NODE; 2115 } else { 2116 int node; 2117 for_each_online_node(node) { 2118 cachep->node[node] = 2119 kmalloc_node(sizeof(struct kmem_cache_node), 2120 gfp, node); 2121 BUG_ON(!cachep->node[node]); 2122 kmem_cache_node_init(cachep->node[node]); 2123 } 2124 } 2125 } 2126 cachep->node[numa_mem_id()]->next_reap = 2127 jiffies + REAPTIMEOUT_NODE + 2128 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 2129 2130 cpu_cache_get(cachep)->avail = 0; 2131 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2132 cpu_cache_get(cachep)->batchcount = 1; 2133 cpu_cache_get(cachep)->touched = 0; 2134 cachep->batchcount = 1; 2135 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2136 return 0; 2137 } 2138 2139 /** 2140 * __kmem_cache_create - Create a cache. 2141 * @cachep: cache management descriptor 2142 * @flags: SLAB flags 2143 * 2144 * Returns a ptr to the cache on success, NULL on failure. 2145 * Cannot be called within a int, but can be interrupted. 2146 * The @ctor is run when new pages are allocated by the cache. 2147 * 2148 * The flags are 2149 * 2150 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2151 * to catch references to uninitialised memory. 2152 * 2153 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2154 * for buffer overruns. 2155 * 2156 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2157 * cacheline. This can be beneficial if you're counting cycles as closely 2158 * as davem. 2159 */ 2160 int 2161 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2162 { 2163 size_t left_over, freelist_size, ralign; 2164 gfp_t gfp; 2165 int err; 2166 size_t size = cachep->size; 2167 2168 #if DEBUG 2169 #if FORCED_DEBUG 2170 /* 2171 * Enable redzoning and last user accounting, except for caches with 2172 * large objects, if the increased size would increase the object size 2173 * above the next power of two: caches with object sizes just above a 2174 * power of two have a significant amount of internal fragmentation. 2175 */ 2176 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2177 2 * sizeof(unsigned long long))) 2178 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2179 if (!(flags & SLAB_DESTROY_BY_RCU)) 2180 flags |= SLAB_POISON; 2181 #endif 2182 if (flags & SLAB_DESTROY_BY_RCU) 2183 BUG_ON(flags & SLAB_POISON); 2184 #endif 2185 2186 /* 2187 * Check that size is in terms of words. This is needed to avoid 2188 * unaligned accesses for some archs when redzoning is used, and makes 2189 * sure any on-slab bufctl's are also correctly aligned. 2190 */ 2191 if (size & (BYTES_PER_WORD - 1)) { 2192 size += (BYTES_PER_WORD - 1); 2193 size &= ~(BYTES_PER_WORD - 1); 2194 } 2195 2196 /* 2197 * Redzoning and user store require word alignment or possibly larger. 2198 * Note this will be overridden by architecture or caller mandated 2199 * alignment if either is greater than BYTES_PER_WORD. 2200 */ 2201 if (flags & SLAB_STORE_USER) 2202 ralign = BYTES_PER_WORD; 2203 2204 if (flags & SLAB_RED_ZONE) { 2205 ralign = REDZONE_ALIGN; 2206 /* If redzoning, ensure that the second redzone is suitably 2207 * aligned, by adjusting the object size accordingly. */ 2208 size += REDZONE_ALIGN - 1; 2209 size &= ~(REDZONE_ALIGN - 1); 2210 } 2211 2212 /* 3) caller mandated alignment */ 2213 if (ralign < cachep->align) { 2214 ralign = cachep->align; 2215 } 2216 /* disable debug if necessary */ 2217 if (ralign > __alignof__(unsigned long long)) 2218 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2219 /* 2220 * 4) Store it. 2221 */ 2222 cachep->align = ralign; 2223 2224 if (slab_is_available()) 2225 gfp = GFP_KERNEL; 2226 else 2227 gfp = GFP_NOWAIT; 2228 2229 setup_node_pointer(cachep); 2230 #if DEBUG 2231 2232 /* 2233 * Both debugging options require word-alignment which is calculated 2234 * into align above. 2235 */ 2236 if (flags & SLAB_RED_ZONE) { 2237 /* add space for red zone words */ 2238 cachep->obj_offset += sizeof(unsigned long long); 2239 size += 2 * sizeof(unsigned long long); 2240 } 2241 if (flags & SLAB_STORE_USER) { 2242 /* user store requires one word storage behind the end of 2243 * the real object. But if the second red zone needs to be 2244 * aligned to 64 bits, we must allow that much space. 2245 */ 2246 if (flags & SLAB_RED_ZONE) 2247 size += REDZONE_ALIGN; 2248 else 2249 size += BYTES_PER_WORD; 2250 } 2251 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2252 if (size >= kmalloc_size(INDEX_NODE + 1) 2253 && cachep->object_size > cache_line_size() 2254 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2255 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2256 size = PAGE_SIZE; 2257 } 2258 #endif 2259 #endif 2260 2261 /* 2262 * Determine if the slab management is 'on' or 'off' slab. 2263 * (bootstrapping cannot cope with offslab caches so don't do 2264 * it too early on. Always use on-slab management when 2265 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) 2266 */ 2267 if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init && 2268 !(flags & SLAB_NOLEAKTRACE)) 2269 /* 2270 * Size is large, assume best to place the slab management obj 2271 * off-slab (should allow better packing of objs). 2272 */ 2273 flags |= CFLGS_OFF_SLAB; 2274 2275 size = ALIGN(size, cachep->align); 2276 /* 2277 * We should restrict the number of objects in a slab to implement 2278 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. 2279 */ 2280 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) 2281 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); 2282 2283 left_over = calculate_slab_order(cachep, size, cachep->align, flags); 2284 2285 if (!cachep->num) 2286 return -E2BIG; 2287 2288 freelist_size = 2289 ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align); 2290 2291 /* 2292 * If the slab has been placed off-slab, and we have enough space then 2293 * move it on-slab. This is at the expense of any extra colouring. 2294 */ 2295 if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { 2296 flags &= ~CFLGS_OFF_SLAB; 2297 left_over -= freelist_size; 2298 } 2299 2300 if (flags & CFLGS_OFF_SLAB) { 2301 /* really off slab. No need for manual alignment */ 2302 freelist_size = cachep->num * sizeof(freelist_idx_t); 2303 2304 #ifdef CONFIG_PAGE_POISONING 2305 /* If we're going to use the generic kernel_map_pages() 2306 * poisoning, then it's going to smash the contents of 2307 * the redzone and userword anyhow, so switch them off. 2308 */ 2309 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) 2310 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2311 #endif 2312 } 2313 2314 cachep->colour_off = cache_line_size(); 2315 /* Offset must be a multiple of the alignment. */ 2316 if (cachep->colour_off < cachep->align) 2317 cachep->colour_off = cachep->align; 2318 cachep->colour = left_over / cachep->colour_off; 2319 cachep->freelist_size = freelist_size; 2320 cachep->flags = flags; 2321 cachep->allocflags = __GFP_COMP; 2322 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2323 cachep->allocflags |= GFP_DMA; 2324 cachep->size = size; 2325 cachep->reciprocal_buffer_size = reciprocal_value(size); 2326 2327 if (flags & CFLGS_OFF_SLAB) { 2328 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2329 /* 2330 * This is a possibility for one of the kmalloc_{dma,}_caches. 2331 * But since we go off slab only for object size greater than 2332 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created 2333 * in ascending order,this should not happen at all. 2334 * But leave a BUG_ON for some lucky dude. 2335 */ 2336 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); 2337 } 2338 2339 err = setup_cpu_cache(cachep, gfp); 2340 if (err) { 2341 __kmem_cache_shutdown(cachep); 2342 return err; 2343 } 2344 2345 if (flags & SLAB_DEBUG_OBJECTS) { 2346 /* 2347 * Would deadlock through slab_destroy()->call_rcu()-> 2348 * debug_object_activate()->kmem_cache_alloc(). 2349 */ 2350 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); 2351 2352 slab_set_debugobj_lock_classes(cachep); 2353 } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU)) 2354 on_slab_lock_classes(cachep); 2355 2356 return 0; 2357 } 2358 2359 #if DEBUG 2360 static void check_irq_off(void) 2361 { 2362 BUG_ON(!irqs_disabled()); 2363 } 2364 2365 static void check_irq_on(void) 2366 { 2367 BUG_ON(irqs_disabled()); 2368 } 2369 2370 static void check_spinlock_acquired(struct kmem_cache *cachep) 2371 { 2372 #ifdef CONFIG_SMP 2373 check_irq_off(); 2374 assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock); 2375 #endif 2376 } 2377 2378 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2379 { 2380 #ifdef CONFIG_SMP 2381 check_irq_off(); 2382 assert_spin_locked(&cachep->node[node]->list_lock); 2383 #endif 2384 } 2385 2386 #else 2387 #define check_irq_off() do { } while(0) 2388 #define check_irq_on() do { } while(0) 2389 #define check_spinlock_acquired(x) do { } while(0) 2390 #define check_spinlock_acquired_node(x, y) do { } while(0) 2391 #endif 2392 2393 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 2394 struct array_cache *ac, 2395 int force, int node); 2396 2397 static void do_drain(void *arg) 2398 { 2399 struct kmem_cache *cachep = arg; 2400 struct array_cache *ac; 2401 int node = numa_mem_id(); 2402 2403 check_irq_off(); 2404 ac = cpu_cache_get(cachep); 2405 spin_lock(&cachep->node[node]->list_lock); 2406 free_block(cachep, ac->entry, ac->avail, node); 2407 spin_unlock(&cachep->node[node]->list_lock); 2408 ac->avail = 0; 2409 } 2410 2411 static void drain_cpu_caches(struct kmem_cache *cachep) 2412 { 2413 struct kmem_cache_node *n; 2414 int node; 2415 2416 on_each_cpu(do_drain, cachep, 1); 2417 check_irq_on(); 2418 for_each_online_node(node) { 2419 n = cachep->node[node]; 2420 if (n && n->alien) 2421 drain_alien_cache(cachep, n->alien); 2422 } 2423 2424 for_each_online_node(node) { 2425 n = cachep->node[node]; 2426 if (n) 2427 drain_array(cachep, n, n->shared, 1, node); 2428 } 2429 } 2430 2431 /* 2432 * Remove slabs from the list of free slabs. 2433 * Specify the number of slabs to drain in tofree. 2434 * 2435 * Returns the actual number of slabs released. 2436 */ 2437 static int drain_freelist(struct kmem_cache *cache, 2438 struct kmem_cache_node *n, int tofree) 2439 { 2440 struct list_head *p; 2441 int nr_freed; 2442 struct page *page; 2443 2444 nr_freed = 0; 2445 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { 2446 2447 spin_lock_irq(&n->list_lock); 2448 p = n->slabs_free.prev; 2449 if (p == &n->slabs_free) { 2450 spin_unlock_irq(&n->list_lock); 2451 goto out; 2452 } 2453 2454 page = list_entry(p, struct page, lru); 2455 #if DEBUG 2456 BUG_ON(page->active); 2457 #endif 2458 list_del(&page->lru); 2459 /* 2460 * Safe to drop the lock. The slab is no longer linked 2461 * to the cache. 2462 */ 2463 n->free_objects -= cache->num; 2464 spin_unlock_irq(&n->list_lock); 2465 slab_destroy(cache, page); 2466 nr_freed++; 2467 } 2468 out: 2469 return nr_freed; 2470 } 2471 2472 /* Called with slab_mutex held to protect against cpu hotplug */ 2473 static int __cache_shrink(struct kmem_cache *cachep) 2474 { 2475 int ret = 0, i = 0; 2476 struct kmem_cache_node *n; 2477 2478 drain_cpu_caches(cachep); 2479 2480 check_irq_on(); 2481 for_each_online_node(i) { 2482 n = cachep->node[i]; 2483 if (!n) 2484 continue; 2485 2486 drain_freelist(cachep, n, slabs_tofree(cachep, n)); 2487 2488 ret += !list_empty(&n->slabs_full) || 2489 !list_empty(&n->slabs_partial); 2490 } 2491 return (ret ? 1 : 0); 2492 } 2493 2494 /** 2495 * kmem_cache_shrink - Shrink a cache. 2496 * @cachep: The cache to shrink. 2497 * 2498 * Releases as many slabs as possible for a cache. 2499 * To help debugging, a zero exit status indicates all slabs were released. 2500 */ 2501 int kmem_cache_shrink(struct kmem_cache *cachep) 2502 { 2503 int ret; 2504 BUG_ON(!cachep || in_interrupt()); 2505 2506 get_online_cpus(); 2507 mutex_lock(&slab_mutex); 2508 ret = __cache_shrink(cachep); 2509 mutex_unlock(&slab_mutex); 2510 put_online_cpus(); 2511 return ret; 2512 } 2513 EXPORT_SYMBOL(kmem_cache_shrink); 2514 2515 int __kmem_cache_shutdown(struct kmem_cache *cachep) 2516 { 2517 int i; 2518 struct kmem_cache_node *n; 2519 int rc = __cache_shrink(cachep); 2520 2521 if (rc) 2522 return rc; 2523 2524 for_each_online_cpu(i) 2525 kfree(cachep->array[i]); 2526 2527 /* NUMA: free the node structures */ 2528 for_each_online_node(i) { 2529 n = cachep->node[i]; 2530 if (n) { 2531 kfree(n->shared); 2532 free_alien_cache(n->alien); 2533 kfree(n); 2534 } 2535 } 2536 return 0; 2537 } 2538 2539 /* 2540 * Get the memory for a slab management obj. 2541 * 2542 * For a slab cache when the slab descriptor is off-slab, the 2543 * slab descriptor can't come from the same cache which is being created, 2544 * Because if it is the case, that means we defer the creation of 2545 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. 2546 * And we eventually call down to __kmem_cache_create(), which 2547 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. 2548 * This is a "chicken-and-egg" problem. 2549 * 2550 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, 2551 * which are all initialized during kmem_cache_init(). 2552 */ 2553 static void *alloc_slabmgmt(struct kmem_cache *cachep, 2554 struct page *page, int colour_off, 2555 gfp_t local_flags, int nodeid) 2556 { 2557 void *freelist; 2558 void *addr = page_address(page); 2559 2560 if (OFF_SLAB(cachep)) { 2561 /* Slab management obj is off-slab. */ 2562 freelist = kmem_cache_alloc_node(cachep->freelist_cache, 2563 local_flags, nodeid); 2564 if (!freelist) 2565 return NULL; 2566 } else { 2567 freelist = addr + colour_off; 2568 colour_off += cachep->freelist_size; 2569 } 2570 page->active = 0; 2571 page->s_mem = addr + colour_off; 2572 return freelist; 2573 } 2574 2575 static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx) 2576 { 2577 return ((freelist_idx_t *)page->freelist)[idx]; 2578 } 2579 2580 static inline void set_free_obj(struct page *page, 2581 unsigned char idx, freelist_idx_t val) 2582 { 2583 ((freelist_idx_t *)(page->freelist))[idx] = val; 2584 } 2585 2586 static void cache_init_objs(struct kmem_cache *cachep, 2587 struct page *page) 2588 { 2589 int i; 2590 2591 for (i = 0; i < cachep->num; i++) { 2592 void *objp = index_to_obj(cachep, page, i); 2593 #if DEBUG 2594 /* need to poison the objs? */ 2595 if (cachep->flags & SLAB_POISON) 2596 poison_obj(cachep, objp, POISON_FREE); 2597 if (cachep->flags & SLAB_STORE_USER) 2598 *dbg_userword(cachep, objp) = NULL; 2599 2600 if (cachep->flags & SLAB_RED_ZONE) { 2601 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2602 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2603 } 2604 /* 2605 * Constructors are not allowed to allocate memory from the same 2606 * cache which they are a constructor for. Otherwise, deadlock. 2607 * They must also be threaded. 2608 */ 2609 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2610 cachep->ctor(objp + obj_offset(cachep)); 2611 2612 if (cachep->flags & SLAB_RED_ZONE) { 2613 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2614 slab_error(cachep, "constructor overwrote the" 2615 " end of an object"); 2616 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2617 slab_error(cachep, "constructor overwrote the" 2618 " start of an object"); 2619 } 2620 if ((cachep->size % PAGE_SIZE) == 0 && 2621 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2622 kernel_map_pages(virt_to_page(objp), 2623 cachep->size / PAGE_SIZE, 0); 2624 #else 2625 if (cachep->ctor) 2626 cachep->ctor(objp); 2627 #endif 2628 set_free_obj(page, i, i); 2629 } 2630 } 2631 2632 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2633 { 2634 if (CONFIG_ZONE_DMA_FLAG) { 2635 if (flags & GFP_DMA) 2636 BUG_ON(!(cachep->allocflags & GFP_DMA)); 2637 else 2638 BUG_ON(cachep->allocflags & GFP_DMA); 2639 } 2640 } 2641 2642 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, 2643 int nodeid) 2644 { 2645 void *objp; 2646 2647 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); 2648 page->active++; 2649 #if DEBUG 2650 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2651 #endif 2652 2653 return objp; 2654 } 2655 2656 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, 2657 void *objp, int nodeid) 2658 { 2659 unsigned int objnr = obj_to_index(cachep, page, objp); 2660 #if DEBUG 2661 unsigned int i; 2662 2663 /* Verify that the slab belongs to the intended node */ 2664 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2665 2666 /* Verify double free bug */ 2667 for (i = page->active; i < cachep->num; i++) { 2668 if (get_free_obj(page, i) == objnr) { 2669 printk(KERN_ERR "slab: double free detected in cache " 2670 "'%s', objp %p\n", cachep->name, objp); 2671 BUG(); 2672 } 2673 } 2674 #endif 2675 page->active--; 2676 set_free_obj(page, page->active, objnr); 2677 } 2678 2679 /* 2680 * Map pages beginning at addr to the given cache and slab. This is required 2681 * for the slab allocator to be able to lookup the cache and slab of a 2682 * virtual address for kfree, ksize, and slab debugging. 2683 */ 2684 static void slab_map_pages(struct kmem_cache *cache, struct page *page, 2685 void *freelist) 2686 { 2687 page->slab_cache = cache; 2688 page->freelist = freelist; 2689 } 2690 2691 /* 2692 * Grow (by 1) the number of slabs within a cache. This is called by 2693 * kmem_cache_alloc() when there are no active objs left in a cache. 2694 */ 2695 static int cache_grow(struct kmem_cache *cachep, 2696 gfp_t flags, int nodeid, struct page *page) 2697 { 2698 void *freelist; 2699 size_t offset; 2700 gfp_t local_flags; 2701 struct kmem_cache_node *n; 2702 2703 /* 2704 * Be lazy and only check for valid flags here, keeping it out of the 2705 * critical path in kmem_cache_alloc(). 2706 */ 2707 BUG_ON(flags & GFP_SLAB_BUG_MASK); 2708 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2709 2710 /* Take the node list lock to change the colour_next on this node */ 2711 check_irq_off(); 2712 n = cachep->node[nodeid]; 2713 spin_lock(&n->list_lock); 2714 2715 /* Get colour for the slab, and cal the next value. */ 2716 offset = n->colour_next; 2717 n->colour_next++; 2718 if (n->colour_next >= cachep->colour) 2719 n->colour_next = 0; 2720 spin_unlock(&n->list_lock); 2721 2722 offset *= cachep->colour_off; 2723 2724 if (local_flags & __GFP_WAIT) 2725 local_irq_enable(); 2726 2727 /* 2728 * The test for missing atomic flag is performed here, rather than 2729 * the more obvious place, simply to reduce the critical path length 2730 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2731 * will eventually be caught here (where it matters). 2732 */ 2733 kmem_flagcheck(cachep, flags); 2734 2735 /* 2736 * Get mem for the objs. Attempt to allocate a physical page from 2737 * 'nodeid'. 2738 */ 2739 if (!page) 2740 page = kmem_getpages(cachep, local_flags, nodeid); 2741 if (!page) 2742 goto failed; 2743 2744 /* Get slab management. */ 2745 freelist = alloc_slabmgmt(cachep, page, offset, 2746 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2747 if (!freelist) 2748 goto opps1; 2749 2750 slab_map_pages(cachep, page, freelist); 2751 2752 cache_init_objs(cachep, page); 2753 2754 if (local_flags & __GFP_WAIT) 2755 local_irq_disable(); 2756 check_irq_off(); 2757 spin_lock(&n->list_lock); 2758 2759 /* Make slab active. */ 2760 list_add_tail(&page->lru, &(n->slabs_free)); 2761 STATS_INC_GROWN(cachep); 2762 n->free_objects += cachep->num; 2763 spin_unlock(&n->list_lock); 2764 return 1; 2765 opps1: 2766 kmem_freepages(cachep, page); 2767 failed: 2768 if (local_flags & __GFP_WAIT) 2769 local_irq_disable(); 2770 return 0; 2771 } 2772 2773 #if DEBUG 2774 2775 /* 2776 * Perform extra freeing checks: 2777 * - detect bad pointers. 2778 * - POISON/RED_ZONE checking 2779 */ 2780 static void kfree_debugcheck(const void *objp) 2781 { 2782 if (!virt_addr_valid(objp)) { 2783 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2784 (unsigned long)objp); 2785 BUG(); 2786 } 2787 } 2788 2789 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2790 { 2791 unsigned long long redzone1, redzone2; 2792 2793 redzone1 = *dbg_redzone1(cache, obj); 2794 redzone2 = *dbg_redzone2(cache, obj); 2795 2796 /* 2797 * Redzone is ok. 2798 */ 2799 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2800 return; 2801 2802 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2803 slab_error(cache, "double free detected"); 2804 else 2805 slab_error(cache, "memory outside object was overwritten"); 2806 2807 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2808 obj, redzone1, redzone2); 2809 } 2810 2811 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2812 unsigned long caller) 2813 { 2814 unsigned int objnr; 2815 struct page *page; 2816 2817 BUG_ON(virt_to_cache(objp) != cachep); 2818 2819 objp -= obj_offset(cachep); 2820 kfree_debugcheck(objp); 2821 page = virt_to_head_page(objp); 2822 2823 if (cachep->flags & SLAB_RED_ZONE) { 2824 verify_redzone_free(cachep, objp); 2825 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2826 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2827 } 2828 if (cachep->flags & SLAB_STORE_USER) 2829 *dbg_userword(cachep, objp) = (void *)caller; 2830 2831 objnr = obj_to_index(cachep, page, objp); 2832 2833 BUG_ON(objnr >= cachep->num); 2834 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2835 2836 if (cachep->flags & SLAB_POISON) { 2837 #ifdef CONFIG_DEBUG_PAGEALLOC 2838 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2839 store_stackinfo(cachep, objp, caller); 2840 kernel_map_pages(virt_to_page(objp), 2841 cachep->size / PAGE_SIZE, 0); 2842 } else { 2843 poison_obj(cachep, objp, POISON_FREE); 2844 } 2845 #else 2846 poison_obj(cachep, objp, POISON_FREE); 2847 #endif 2848 } 2849 return objp; 2850 } 2851 2852 #else 2853 #define kfree_debugcheck(x) do { } while(0) 2854 #define cache_free_debugcheck(x,objp,z) (objp) 2855 #endif 2856 2857 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, 2858 bool force_refill) 2859 { 2860 int batchcount; 2861 struct kmem_cache_node *n; 2862 struct array_cache *ac; 2863 int node; 2864 2865 check_irq_off(); 2866 node = numa_mem_id(); 2867 if (unlikely(force_refill)) 2868 goto force_grow; 2869 retry: 2870 ac = cpu_cache_get(cachep); 2871 batchcount = ac->batchcount; 2872 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2873 /* 2874 * If there was little recent activity on this cache, then 2875 * perform only a partial refill. Otherwise we could generate 2876 * refill bouncing. 2877 */ 2878 batchcount = BATCHREFILL_LIMIT; 2879 } 2880 n = cachep->node[node]; 2881 2882 BUG_ON(ac->avail > 0 || !n); 2883 spin_lock(&n->list_lock); 2884 2885 /* See if we can refill from the shared array */ 2886 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { 2887 n->shared->touched = 1; 2888 goto alloc_done; 2889 } 2890 2891 while (batchcount > 0) { 2892 struct list_head *entry; 2893 struct page *page; 2894 /* Get slab alloc is to come from. */ 2895 entry = n->slabs_partial.next; 2896 if (entry == &n->slabs_partial) { 2897 n->free_touched = 1; 2898 entry = n->slabs_free.next; 2899 if (entry == &n->slabs_free) 2900 goto must_grow; 2901 } 2902 2903 page = list_entry(entry, struct page, lru); 2904 check_spinlock_acquired(cachep); 2905 2906 /* 2907 * The slab was either on partial or free list so 2908 * there must be at least one object available for 2909 * allocation. 2910 */ 2911 BUG_ON(page->active >= cachep->num); 2912 2913 while (page->active < cachep->num && batchcount--) { 2914 STATS_INC_ALLOCED(cachep); 2915 STATS_INC_ACTIVE(cachep); 2916 STATS_SET_HIGH(cachep); 2917 2918 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, 2919 node)); 2920 } 2921 2922 /* move slabp to correct slabp list: */ 2923 list_del(&page->lru); 2924 if (page->active == cachep->num) 2925 list_add(&page->lru, &n->slabs_full); 2926 else 2927 list_add(&page->lru, &n->slabs_partial); 2928 } 2929 2930 must_grow: 2931 n->free_objects -= ac->avail; 2932 alloc_done: 2933 spin_unlock(&n->list_lock); 2934 2935 if (unlikely(!ac->avail)) { 2936 int x; 2937 force_grow: 2938 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 2939 2940 /* cache_grow can reenable interrupts, then ac could change. */ 2941 ac = cpu_cache_get(cachep); 2942 node = numa_mem_id(); 2943 2944 /* no objects in sight? abort */ 2945 if (!x && (ac->avail == 0 || force_refill)) 2946 return NULL; 2947 2948 if (!ac->avail) /* objects refilled by interrupt? */ 2949 goto retry; 2950 } 2951 ac->touched = 1; 2952 2953 return ac_get_obj(cachep, ac, flags, force_refill); 2954 } 2955 2956 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 2957 gfp_t flags) 2958 { 2959 might_sleep_if(flags & __GFP_WAIT); 2960 #if DEBUG 2961 kmem_flagcheck(cachep, flags); 2962 #endif 2963 } 2964 2965 #if DEBUG 2966 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2967 gfp_t flags, void *objp, unsigned long caller) 2968 { 2969 if (!objp) 2970 return objp; 2971 if (cachep->flags & SLAB_POISON) { 2972 #ifdef CONFIG_DEBUG_PAGEALLOC 2973 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 2974 kernel_map_pages(virt_to_page(objp), 2975 cachep->size / PAGE_SIZE, 1); 2976 else 2977 check_poison_obj(cachep, objp); 2978 #else 2979 check_poison_obj(cachep, objp); 2980 #endif 2981 poison_obj(cachep, objp, POISON_INUSE); 2982 } 2983 if (cachep->flags & SLAB_STORE_USER) 2984 *dbg_userword(cachep, objp) = (void *)caller; 2985 2986 if (cachep->flags & SLAB_RED_ZONE) { 2987 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 2988 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 2989 slab_error(cachep, "double free, or memory outside" 2990 " object was overwritten"); 2991 printk(KERN_ERR 2992 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2993 objp, *dbg_redzone1(cachep, objp), 2994 *dbg_redzone2(cachep, objp)); 2995 } 2996 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 2997 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2998 } 2999 objp += obj_offset(cachep); 3000 if (cachep->ctor && cachep->flags & SLAB_POISON) 3001 cachep->ctor(objp); 3002 if (ARCH_SLAB_MINALIGN && 3003 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3004 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3005 objp, (int)ARCH_SLAB_MINALIGN); 3006 } 3007 return objp; 3008 } 3009 #else 3010 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3011 #endif 3012 3013 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 3014 { 3015 if (cachep == kmem_cache) 3016 return false; 3017 3018 return should_failslab(cachep->object_size, flags, cachep->flags); 3019 } 3020 3021 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3022 { 3023 void *objp; 3024 struct array_cache *ac; 3025 bool force_refill = false; 3026 3027 check_irq_off(); 3028 3029 ac = cpu_cache_get(cachep); 3030 if (likely(ac->avail)) { 3031 ac->touched = 1; 3032 objp = ac_get_obj(cachep, ac, flags, false); 3033 3034 /* 3035 * Allow for the possibility all avail objects are not allowed 3036 * by the current flags 3037 */ 3038 if (objp) { 3039 STATS_INC_ALLOCHIT(cachep); 3040 goto out; 3041 } 3042 force_refill = true; 3043 } 3044 3045 STATS_INC_ALLOCMISS(cachep); 3046 objp = cache_alloc_refill(cachep, flags, force_refill); 3047 /* 3048 * the 'ac' may be updated by cache_alloc_refill(), 3049 * and kmemleak_erase() requires its correct value. 3050 */ 3051 ac = cpu_cache_get(cachep); 3052 3053 out: 3054 /* 3055 * To avoid a false negative, if an object that is in one of the 3056 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3057 * treat the array pointers as a reference to the object. 3058 */ 3059 if (objp) 3060 kmemleak_erase(&ac->entry[ac->avail]); 3061 return objp; 3062 } 3063 3064 #ifdef CONFIG_NUMA 3065 /* 3066 * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. 3067 * 3068 * If we are in_interrupt, then process context, including cpusets and 3069 * mempolicy, may not apply and should not be used for allocation policy. 3070 */ 3071 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3072 { 3073 int nid_alloc, nid_here; 3074 3075 if (in_interrupt() || (flags & __GFP_THISNODE)) 3076 return NULL; 3077 nid_alloc = nid_here = numa_mem_id(); 3078 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3079 nid_alloc = cpuset_slab_spread_node(); 3080 else if (current->mempolicy) 3081 nid_alloc = mempolicy_slab_node(); 3082 if (nid_alloc != nid_here) 3083 return ____cache_alloc_node(cachep, flags, nid_alloc); 3084 return NULL; 3085 } 3086 3087 /* 3088 * Fallback function if there was no memory available and no objects on a 3089 * certain node and fall back is permitted. First we scan all the 3090 * available node for available objects. If that fails then we 3091 * perform an allocation without specifying a node. This allows the page 3092 * allocator to do its reclaim / fallback magic. We then insert the 3093 * slab into the proper nodelist and then allocate from it. 3094 */ 3095 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3096 { 3097 struct zonelist *zonelist; 3098 gfp_t local_flags; 3099 struct zoneref *z; 3100 struct zone *zone; 3101 enum zone_type high_zoneidx = gfp_zone(flags); 3102 void *obj = NULL; 3103 int nid; 3104 unsigned int cpuset_mems_cookie; 3105 3106 if (flags & __GFP_THISNODE) 3107 return NULL; 3108 3109 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3110 3111 retry_cpuset: 3112 cpuset_mems_cookie = read_mems_allowed_begin(); 3113 zonelist = node_zonelist(mempolicy_slab_node(), flags); 3114 3115 retry: 3116 /* 3117 * Look through allowed nodes for objects available 3118 * from existing per node queues. 3119 */ 3120 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3121 nid = zone_to_nid(zone); 3122 3123 if (cpuset_zone_allowed_hardwall(zone, flags) && 3124 cache->node[nid] && 3125 cache->node[nid]->free_objects) { 3126 obj = ____cache_alloc_node(cache, 3127 flags | GFP_THISNODE, nid); 3128 if (obj) 3129 break; 3130 } 3131 } 3132 3133 if (!obj) { 3134 /* 3135 * This allocation will be performed within the constraints 3136 * of the current cpuset / memory policy requirements. 3137 * We may trigger various forms of reclaim on the allowed 3138 * set and go into memory reserves if necessary. 3139 */ 3140 struct page *page; 3141 3142 if (local_flags & __GFP_WAIT) 3143 local_irq_enable(); 3144 kmem_flagcheck(cache, flags); 3145 page = kmem_getpages(cache, local_flags, numa_mem_id()); 3146 if (local_flags & __GFP_WAIT) 3147 local_irq_disable(); 3148 if (page) { 3149 /* 3150 * Insert into the appropriate per node queues 3151 */ 3152 nid = page_to_nid(page); 3153 if (cache_grow(cache, flags, nid, page)) { 3154 obj = ____cache_alloc_node(cache, 3155 flags | GFP_THISNODE, nid); 3156 if (!obj) 3157 /* 3158 * Another processor may allocate the 3159 * objects in the slab since we are 3160 * not holding any locks. 3161 */ 3162 goto retry; 3163 } else { 3164 /* cache_grow already freed obj */ 3165 obj = NULL; 3166 } 3167 } 3168 } 3169 3170 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) 3171 goto retry_cpuset; 3172 return obj; 3173 } 3174 3175 /* 3176 * A interface to enable slab creation on nodeid 3177 */ 3178 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3179 int nodeid) 3180 { 3181 struct list_head *entry; 3182 struct page *page; 3183 struct kmem_cache_node *n; 3184 void *obj; 3185 int x; 3186 3187 VM_BUG_ON(nodeid > num_online_nodes()); 3188 n = cachep->node[nodeid]; 3189 BUG_ON(!n); 3190 3191 retry: 3192 check_irq_off(); 3193 spin_lock(&n->list_lock); 3194 entry = n->slabs_partial.next; 3195 if (entry == &n->slabs_partial) { 3196 n->free_touched = 1; 3197 entry = n->slabs_free.next; 3198 if (entry == &n->slabs_free) 3199 goto must_grow; 3200 } 3201 3202 page = list_entry(entry, struct page, lru); 3203 check_spinlock_acquired_node(cachep, nodeid); 3204 3205 STATS_INC_NODEALLOCS(cachep); 3206 STATS_INC_ACTIVE(cachep); 3207 STATS_SET_HIGH(cachep); 3208 3209 BUG_ON(page->active == cachep->num); 3210 3211 obj = slab_get_obj(cachep, page, nodeid); 3212 n->free_objects--; 3213 /* move slabp to correct slabp list: */ 3214 list_del(&page->lru); 3215 3216 if (page->active == cachep->num) 3217 list_add(&page->lru, &n->slabs_full); 3218 else 3219 list_add(&page->lru, &n->slabs_partial); 3220 3221 spin_unlock(&n->list_lock); 3222 goto done; 3223 3224 must_grow: 3225 spin_unlock(&n->list_lock); 3226 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3227 if (x) 3228 goto retry; 3229 3230 return fallback_alloc(cachep, flags); 3231 3232 done: 3233 return obj; 3234 } 3235 3236 static __always_inline void * 3237 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3238 unsigned long caller) 3239 { 3240 unsigned long save_flags; 3241 void *ptr; 3242 int slab_node = numa_mem_id(); 3243 3244 flags &= gfp_allowed_mask; 3245 3246 lockdep_trace_alloc(flags); 3247 3248 if (slab_should_failslab(cachep, flags)) 3249 return NULL; 3250 3251 cachep = memcg_kmem_get_cache(cachep, flags); 3252 3253 cache_alloc_debugcheck_before(cachep, flags); 3254 local_irq_save(save_flags); 3255 3256 if (nodeid == NUMA_NO_NODE) 3257 nodeid = slab_node; 3258 3259 if (unlikely(!cachep->node[nodeid])) { 3260 /* Node not bootstrapped yet */ 3261 ptr = fallback_alloc(cachep, flags); 3262 goto out; 3263 } 3264 3265 if (nodeid == slab_node) { 3266 /* 3267 * Use the locally cached objects if possible. 3268 * However ____cache_alloc does not allow fallback 3269 * to other nodes. It may fail while we still have 3270 * objects on other nodes available. 3271 */ 3272 ptr = ____cache_alloc(cachep, flags); 3273 if (ptr) 3274 goto out; 3275 } 3276 /* ___cache_alloc_node can fall back to other nodes */ 3277 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3278 out: 3279 local_irq_restore(save_flags); 3280 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3281 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, 3282 flags); 3283 3284 if (likely(ptr)) { 3285 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); 3286 if (unlikely(flags & __GFP_ZERO)) 3287 memset(ptr, 0, cachep->object_size); 3288 } 3289 3290 return ptr; 3291 } 3292 3293 static __always_inline void * 3294 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3295 { 3296 void *objp; 3297 3298 if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { 3299 objp = alternate_node_alloc(cache, flags); 3300 if (objp) 3301 goto out; 3302 } 3303 objp = ____cache_alloc(cache, flags); 3304 3305 /* 3306 * We may just have run out of memory on the local node. 3307 * ____cache_alloc_node() knows how to locate memory on other nodes 3308 */ 3309 if (!objp) 3310 objp = ____cache_alloc_node(cache, flags, numa_mem_id()); 3311 3312 out: 3313 return objp; 3314 } 3315 #else 3316 3317 static __always_inline void * 3318 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3319 { 3320 return ____cache_alloc(cachep, flags); 3321 } 3322 3323 #endif /* CONFIG_NUMA */ 3324 3325 static __always_inline void * 3326 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) 3327 { 3328 unsigned long save_flags; 3329 void *objp; 3330 3331 flags &= gfp_allowed_mask; 3332 3333 lockdep_trace_alloc(flags); 3334 3335 if (slab_should_failslab(cachep, flags)) 3336 return NULL; 3337 3338 cachep = memcg_kmem_get_cache(cachep, flags); 3339 3340 cache_alloc_debugcheck_before(cachep, flags); 3341 local_irq_save(save_flags); 3342 objp = __do_cache_alloc(cachep, flags); 3343 local_irq_restore(save_flags); 3344 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3345 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, 3346 flags); 3347 prefetchw(objp); 3348 3349 if (likely(objp)) { 3350 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); 3351 if (unlikely(flags & __GFP_ZERO)) 3352 memset(objp, 0, cachep->object_size); 3353 } 3354 3355 return objp; 3356 } 3357 3358 /* 3359 * Caller needs to acquire correct kmem_cache_node's list_lock 3360 */ 3361 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3362 int node) 3363 { 3364 int i; 3365 struct kmem_cache_node *n; 3366 3367 for (i = 0; i < nr_objects; i++) { 3368 void *objp; 3369 struct page *page; 3370 3371 clear_obj_pfmemalloc(&objpp[i]); 3372 objp = objpp[i]; 3373 3374 page = virt_to_head_page(objp); 3375 n = cachep->node[node]; 3376 list_del(&page->lru); 3377 check_spinlock_acquired_node(cachep, node); 3378 slab_put_obj(cachep, page, objp, node); 3379 STATS_DEC_ACTIVE(cachep); 3380 n->free_objects++; 3381 3382 /* fixup slab chains */ 3383 if (page->active == 0) { 3384 if (n->free_objects > n->free_limit) { 3385 n->free_objects -= cachep->num; 3386 /* No need to drop any previously held 3387 * lock here, even if we have a off-slab slab 3388 * descriptor it is guaranteed to come from 3389 * a different cache, refer to comments before 3390 * alloc_slabmgmt. 3391 */ 3392 slab_destroy(cachep, page); 3393 } else { 3394 list_add(&page->lru, &n->slabs_free); 3395 } 3396 } else { 3397 /* Unconditionally move a slab to the end of the 3398 * partial list on free - maximum time for the 3399 * other objects to be freed, too. 3400 */ 3401 list_add_tail(&page->lru, &n->slabs_partial); 3402 } 3403 } 3404 } 3405 3406 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3407 { 3408 int batchcount; 3409 struct kmem_cache_node *n; 3410 int node = numa_mem_id(); 3411 3412 batchcount = ac->batchcount; 3413 #if DEBUG 3414 BUG_ON(!batchcount || batchcount > ac->avail); 3415 #endif 3416 check_irq_off(); 3417 n = cachep->node[node]; 3418 spin_lock(&n->list_lock); 3419 if (n->shared) { 3420 struct array_cache *shared_array = n->shared; 3421 int max = shared_array->limit - shared_array->avail; 3422 if (max) { 3423 if (batchcount > max) 3424 batchcount = max; 3425 memcpy(&(shared_array->entry[shared_array->avail]), 3426 ac->entry, sizeof(void *) * batchcount); 3427 shared_array->avail += batchcount; 3428 goto free_done; 3429 } 3430 } 3431 3432 free_block(cachep, ac->entry, batchcount, node); 3433 free_done: 3434 #if STATS 3435 { 3436 int i = 0; 3437 struct list_head *p; 3438 3439 p = n->slabs_free.next; 3440 while (p != &(n->slabs_free)) { 3441 struct page *page; 3442 3443 page = list_entry(p, struct page, lru); 3444 BUG_ON(page->active); 3445 3446 i++; 3447 p = p->next; 3448 } 3449 STATS_SET_FREEABLE(cachep, i); 3450 } 3451 #endif 3452 spin_unlock(&n->list_lock); 3453 ac->avail -= batchcount; 3454 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3455 } 3456 3457 /* 3458 * Release an obj back to its cache. If the obj has a constructed state, it must 3459 * be in this state _before_ it is released. Called with disabled ints. 3460 */ 3461 static inline void __cache_free(struct kmem_cache *cachep, void *objp, 3462 unsigned long caller) 3463 { 3464 struct array_cache *ac = cpu_cache_get(cachep); 3465 3466 check_irq_off(); 3467 kmemleak_free_recursive(objp, cachep->flags); 3468 objp = cache_free_debugcheck(cachep, objp, caller); 3469 3470 kmemcheck_slab_free(cachep, objp, cachep->object_size); 3471 3472 /* 3473 * Skip calling cache_free_alien() when the platform is not numa. 3474 * This will avoid cache misses that happen while accessing slabp (which 3475 * is per page memory reference) to get nodeid. Instead use a global 3476 * variable to skip the call, which is mostly likely to be present in 3477 * the cache. 3478 */ 3479 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3480 return; 3481 3482 if (likely(ac->avail < ac->limit)) { 3483 STATS_INC_FREEHIT(cachep); 3484 } else { 3485 STATS_INC_FREEMISS(cachep); 3486 cache_flusharray(cachep, ac); 3487 } 3488 3489 ac_put_obj(cachep, ac, objp); 3490 } 3491 3492 /** 3493 * kmem_cache_alloc - Allocate an object 3494 * @cachep: The cache to allocate from. 3495 * @flags: See kmalloc(). 3496 * 3497 * Allocate an object from this cache. The flags are only relevant 3498 * if the cache has no available objects. 3499 */ 3500 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3501 { 3502 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3503 3504 trace_kmem_cache_alloc(_RET_IP_, ret, 3505 cachep->object_size, cachep->size, flags); 3506 3507 return ret; 3508 } 3509 EXPORT_SYMBOL(kmem_cache_alloc); 3510 3511 #ifdef CONFIG_TRACING 3512 void * 3513 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) 3514 { 3515 void *ret; 3516 3517 ret = slab_alloc(cachep, flags, _RET_IP_); 3518 3519 trace_kmalloc(_RET_IP_, ret, 3520 size, cachep->size, flags); 3521 return ret; 3522 } 3523 EXPORT_SYMBOL(kmem_cache_alloc_trace); 3524 #endif 3525 3526 #ifdef CONFIG_NUMA 3527 /** 3528 * kmem_cache_alloc_node - Allocate an object on the specified node 3529 * @cachep: The cache to allocate from. 3530 * @flags: See kmalloc(). 3531 * @nodeid: node number of the target node. 3532 * 3533 * Identical to kmem_cache_alloc but it will allocate memory on the given 3534 * node, which can improve the performance for cpu bound structures. 3535 * 3536 * Fallback to other node is possible if __GFP_THISNODE is not set. 3537 */ 3538 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3539 { 3540 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3541 3542 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3543 cachep->object_size, cachep->size, 3544 flags, nodeid); 3545 3546 return ret; 3547 } 3548 EXPORT_SYMBOL(kmem_cache_alloc_node); 3549 3550 #ifdef CONFIG_TRACING 3551 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, 3552 gfp_t flags, 3553 int nodeid, 3554 size_t size) 3555 { 3556 void *ret; 3557 3558 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3559 3560 trace_kmalloc_node(_RET_IP_, ret, 3561 size, cachep->size, 3562 flags, nodeid); 3563 return ret; 3564 } 3565 EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 3566 #endif 3567 3568 static __always_inline void * 3569 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) 3570 { 3571 struct kmem_cache *cachep; 3572 3573 cachep = kmalloc_slab(size, flags); 3574 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3575 return cachep; 3576 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3577 } 3578 3579 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3580 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3581 { 3582 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3583 } 3584 EXPORT_SYMBOL(__kmalloc_node); 3585 3586 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3587 int node, unsigned long caller) 3588 { 3589 return __do_kmalloc_node(size, flags, node, caller); 3590 } 3591 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3592 #else 3593 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3594 { 3595 return __do_kmalloc_node(size, flags, node, 0); 3596 } 3597 EXPORT_SYMBOL(__kmalloc_node); 3598 #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ 3599 #endif /* CONFIG_NUMA */ 3600 3601 /** 3602 * __do_kmalloc - allocate memory 3603 * @size: how many bytes of memory are required. 3604 * @flags: the type of memory to allocate (see kmalloc). 3605 * @caller: function caller for debug tracking of the caller 3606 */ 3607 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3608 unsigned long caller) 3609 { 3610 struct kmem_cache *cachep; 3611 void *ret; 3612 3613 cachep = kmalloc_slab(size, flags); 3614 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3615 return cachep; 3616 ret = slab_alloc(cachep, flags, caller); 3617 3618 trace_kmalloc(caller, ret, 3619 size, cachep->size, flags); 3620 3621 return ret; 3622 } 3623 3624 3625 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3626 void *__kmalloc(size_t size, gfp_t flags) 3627 { 3628 return __do_kmalloc(size, flags, _RET_IP_); 3629 } 3630 EXPORT_SYMBOL(__kmalloc); 3631 3632 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3633 { 3634 return __do_kmalloc(size, flags, caller); 3635 } 3636 EXPORT_SYMBOL(__kmalloc_track_caller); 3637 3638 #else 3639 void *__kmalloc(size_t size, gfp_t flags) 3640 { 3641 return __do_kmalloc(size, flags, 0); 3642 } 3643 EXPORT_SYMBOL(__kmalloc); 3644 #endif 3645 3646 /** 3647 * kmem_cache_free - Deallocate an object 3648 * @cachep: The cache the allocation was from. 3649 * @objp: The previously allocated object. 3650 * 3651 * Free an object which was previously allocated from this 3652 * cache. 3653 */ 3654 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3655 { 3656 unsigned long flags; 3657 cachep = cache_from_obj(cachep, objp); 3658 if (!cachep) 3659 return; 3660 3661 local_irq_save(flags); 3662 debug_check_no_locks_freed(objp, cachep->object_size); 3663 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3664 debug_check_no_obj_freed(objp, cachep->object_size); 3665 __cache_free(cachep, objp, _RET_IP_); 3666 local_irq_restore(flags); 3667 3668 trace_kmem_cache_free(_RET_IP_, objp); 3669 } 3670 EXPORT_SYMBOL(kmem_cache_free); 3671 3672 /** 3673 * kfree - free previously allocated memory 3674 * @objp: pointer returned by kmalloc. 3675 * 3676 * If @objp is NULL, no operation is performed. 3677 * 3678 * Don't free memory not originally allocated by kmalloc() 3679 * or you will run into trouble. 3680 */ 3681 void kfree(const void *objp) 3682 { 3683 struct kmem_cache *c; 3684 unsigned long flags; 3685 3686 trace_kfree(_RET_IP_, objp); 3687 3688 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3689 return; 3690 local_irq_save(flags); 3691 kfree_debugcheck(objp); 3692 c = virt_to_cache(objp); 3693 debug_check_no_locks_freed(objp, c->object_size); 3694 3695 debug_check_no_obj_freed(objp, c->object_size); 3696 __cache_free(c, (void *)objp, _RET_IP_); 3697 local_irq_restore(flags); 3698 } 3699 EXPORT_SYMBOL(kfree); 3700 3701 /* 3702 * This initializes kmem_cache_node or resizes various caches for all nodes. 3703 */ 3704 static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) 3705 { 3706 int node; 3707 struct kmem_cache_node *n; 3708 struct array_cache *new_shared; 3709 struct array_cache **new_alien = NULL; 3710 3711 for_each_online_node(node) { 3712 3713 if (use_alien_caches) { 3714 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 3715 if (!new_alien) 3716 goto fail; 3717 } 3718 3719 new_shared = NULL; 3720 if (cachep->shared) { 3721 new_shared = alloc_arraycache(node, 3722 cachep->shared*cachep->batchcount, 3723 0xbaadf00d, gfp); 3724 if (!new_shared) { 3725 free_alien_cache(new_alien); 3726 goto fail; 3727 } 3728 } 3729 3730 n = cachep->node[node]; 3731 if (n) { 3732 struct array_cache *shared = n->shared; 3733 3734 spin_lock_irq(&n->list_lock); 3735 3736 if (shared) 3737 free_block(cachep, shared->entry, 3738 shared->avail, node); 3739 3740 n->shared = new_shared; 3741 if (!n->alien) { 3742 n->alien = new_alien; 3743 new_alien = NULL; 3744 } 3745 n->free_limit = (1 + nr_cpus_node(node)) * 3746 cachep->batchcount + cachep->num; 3747 spin_unlock_irq(&n->list_lock); 3748 kfree(shared); 3749 free_alien_cache(new_alien); 3750 continue; 3751 } 3752 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); 3753 if (!n) { 3754 free_alien_cache(new_alien); 3755 kfree(new_shared); 3756 goto fail; 3757 } 3758 3759 kmem_cache_node_init(n); 3760 n->next_reap = jiffies + REAPTIMEOUT_NODE + 3761 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 3762 n->shared = new_shared; 3763 n->alien = new_alien; 3764 n->free_limit = (1 + nr_cpus_node(node)) * 3765 cachep->batchcount + cachep->num; 3766 cachep->node[node] = n; 3767 } 3768 return 0; 3769 3770 fail: 3771 if (!cachep->list.next) { 3772 /* Cache is not active yet. Roll back what we did */ 3773 node--; 3774 while (node >= 0) { 3775 if (cachep->node[node]) { 3776 n = cachep->node[node]; 3777 3778 kfree(n->shared); 3779 free_alien_cache(n->alien); 3780 kfree(n); 3781 cachep->node[node] = NULL; 3782 } 3783 node--; 3784 } 3785 } 3786 return -ENOMEM; 3787 } 3788 3789 struct ccupdate_struct { 3790 struct kmem_cache *cachep; 3791 struct array_cache *new[0]; 3792 }; 3793 3794 static void do_ccupdate_local(void *info) 3795 { 3796 struct ccupdate_struct *new = info; 3797 struct array_cache *old; 3798 3799 check_irq_off(); 3800 old = cpu_cache_get(new->cachep); 3801 3802 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3803 new->new[smp_processor_id()] = old; 3804 } 3805 3806 /* Always called with the slab_mutex held */ 3807 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, 3808 int batchcount, int shared, gfp_t gfp) 3809 { 3810 struct ccupdate_struct *new; 3811 int i; 3812 3813 new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), 3814 gfp); 3815 if (!new) 3816 return -ENOMEM; 3817 3818 for_each_online_cpu(i) { 3819 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, 3820 batchcount, gfp); 3821 if (!new->new[i]) { 3822 for (i--; i >= 0; i--) 3823 kfree(new->new[i]); 3824 kfree(new); 3825 return -ENOMEM; 3826 } 3827 } 3828 new->cachep = cachep; 3829 3830 on_each_cpu(do_ccupdate_local, (void *)new, 1); 3831 3832 check_irq_on(); 3833 cachep->batchcount = batchcount; 3834 cachep->limit = limit; 3835 cachep->shared = shared; 3836 3837 for_each_online_cpu(i) { 3838 struct array_cache *ccold = new->new[i]; 3839 if (!ccold) 3840 continue; 3841 spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); 3842 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); 3843 spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); 3844 kfree(ccold); 3845 } 3846 kfree(new); 3847 return alloc_kmem_cache_node(cachep, gfp); 3848 } 3849 3850 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3851 int batchcount, int shared, gfp_t gfp) 3852 { 3853 int ret; 3854 struct kmem_cache *c = NULL; 3855 int i = 0; 3856 3857 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3858 3859 if (slab_state < FULL) 3860 return ret; 3861 3862 if ((ret < 0) || !is_root_cache(cachep)) 3863 return ret; 3864 3865 VM_BUG_ON(!mutex_is_locked(&slab_mutex)); 3866 for_each_memcg_cache_index(i) { 3867 c = cache_from_memcg_idx(cachep, i); 3868 if (c) 3869 /* return value determined by the parent cache only */ 3870 __do_tune_cpucache(c, limit, batchcount, shared, gfp); 3871 } 3872 3873 return ret; 3874 } 3875 3876 /* Called with slab_mutex held always */ 3877 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 3878 { 3879 int err; 3880 int limit = 0; 3881 int shared = 0; 3882 int batchcount = 0; 3883 3884 if (!is_root_cache(cachep)) { 3885 struct kmem_cache *root = memcg_root_cache(cachep); 3886 limit = root->limit; 3887 shared = root->shared; 3888 batchcount = root->batchcount; 3889 } 3890 3891 if (limit && shared && batchcount) 3892 goto skip_setup; 3893 /* 3894 * The head array serves three purposes: 3895 * - create a LIFO ordering, i.e. return objects that are cache-warm 3896 * - reduce the number of spinlock operations. 3897 * - reduce the number of linked list operations on the slab and 3898 * bufctl chains: array operations are cheaper. 3899 * The numbers are guessed, we should auto-tune as described by 3900 * Bonwick. 3901 */ 3902 if (cachep->size > 131072) 3903 limit = 1; 3904 else if (cachep->size > PAGE_SIZE) 3905 limit = 8; 3906 else if (cachep->size > 1024) 3907 limit = 24; 3908 else if (cachep->size > 256) 3909 limit = 54; 3910 else 3911 limit = 120; 3912 3913 /* 3914 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3915 * allocation behaviour: Most allocs on one cpu, most free operations 3916 * on another cpu. For these cases, an efficient object passing between 3917 * cpus is necessary. This is provided by a shared array. The array 3918 * replaces Bonwick's magazine layer. 3919 * On uniprocessor, it's functionally equivalent (but less efficient) 3920 * to a larger limit. Thus disabled by default. 3921 */ 3922 shared = 0; 3923 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) 3924 shared = 8; 3925 3926 #if DEBUG 3927 /* 3928 * With debugging enabled, large batchcount lead to excessively long 3929 * periods with disabled local interrupts. Limit the batchcount 3930 */ 3931 if (limit > 32) 3932 limit = 32; 3933 #endif 3934 batchcount = (limit + 1) / 2; 3935 skip_setup: 3936 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); 3937 if (err) 3938 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3939 cachep->name, -err); 3940 return err; 3941 } 3942 3943 /* 3944 * Drain an array if it contains any elements taking the node lock only if 3945 * necessary. Note that the node listlock also protects the array_cache 3946 * if drain_array() is used on the shared array. 3947 */ 3948 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, 3949 struct array_cache *ac, int force, int node) 3950 { 3951 int tofree; 3952 3953 if (!ac || !ac->avail) 3954 return; 3955 if (ac->touched && !force) { 3956 ac->touched = 0; 3957 } else { 3958 spin_lock_irq(&n->list_lock); 3959 if (ac->avail) { 3960 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3961 if (tofree > ac->avail) 3962 tofree = (ac->avail + 1) / 2; 3963 free_block(cachep, ac->entry, tofree, node); 3964 ac->avail -= tofree; 3965 memmove(ac->entry, &(ac->entry[tofree]), 3966 sizeof(void *) * ac->avail); 3967 } 3968 spin_unlock_irq(&n->list_lock); 3969 } 3970 } 3971 3972 /** 3973 * cache_reap - Reclaim memory from caches. 3974 * @w: work descriptor 3975 * 3976 * Called from workqueue/eventd every few seconds. 3977 * Purpose: 3978 * - clear the per-cpu caches for this CPU. 3979 * - return freeable pages to the main free memory pool. 3980 * 3981 * If we cannot acquire the cache chain mutex then just give up - we'll try 3982 * again on the next iteration. 3983 */ 3984 static void cache_reap(struct work_struct *w) 3985 { 3986 struct kmem_cache *searchp; 3987 struct kmem_cache_node *n; 3988 int node = numa_mem_id(); 3989 struct delayed_work *work = to_delayed_work(w); 3990 3991 if (!mutex_trylock(&slab_mutex)) 3992 /* Give up. Setup the next iteration. */ 3993 goto out; 3994 3995 list_for_each_entry(searchp, &slab_caches, list) { 3996 check_irq_on(); 3997 3998 /* 3999 * We only take the node lock if absolutely necessary and we 4000 * have established with reasonable certainty that 4001 * we can do some work if the lock was obtained. 4002 */ 4003 n = searchp->node[node]; 4004 4005 reap_alien(searchp, n); 4006 4007 drain_array(searchp, n, cpu_cache_get(searchp), 0, node); 4008 4009 /* 4010 * These are racy checks but it does not matter 4011 * if we skip one check or scan twice. 4012 */ 4013 if (time_after(n->next_reap, jiffies)) 4014 goto next; 4015 4016 n->next_reap = jiffies + REAPTIMEOUT_NODE; 4017 4018 drain_array(searchp, n, n->shared, 0, node); 4019 4020 if (n->free_touched) 4021 n->free_touched = 0; 4022 else { 4023 int freed; 4024 4025 freed = drain_freelist(searchp, n, (n->free_limit + 4026 5 * searchp->num - 1) / (5 * searchp->num)); 4027 STATS_ADD_REAPED(searchp, freed); 4028 } 4029 next: 4030 cond_resched(); 4031 } 4032 check_irq_on(); 4033 mutex_unlock(&slab_mutex); 4034 next_reap_node(); 4035 out: 4036 /* Set up the next iteration */ 4037 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); 4038 } 4039 4040 #ifdef CONFIG_SLABINFO 4041 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) 4042 { 4043 struct page *page; 4044 unsigned long active_objs; 4045 unsigned long num_objs; 4046 unsigned long active_slabs = 0; 4047 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4048 const char *name; 4049 char *error = NULL; 4050 int node; 4051 struct kmem_cache_node *n; 4052 4053 active_objs = 0; 4054 num_slabs = 0; 4055 for_each_online_node(node) { 4056 n = cachep->node[node]; 4057 if (!n) 4058 continue; 4059 4060 check_irq_on(); 4061 spin_lock_irq(&n->list_lock); 4062 4063 list_for_each_entry(page, &n->slabs_full, lru) { 4064 if (page->active != cachep->num && !error) 4065 error = "slabs_full accounting error"; 4066 active_objs += cachep->num; 4067 active_slabs++; 4068 } 4069 list_for_each_entry(page, &n->slabs_partial, lru) { 4070 if (page->active == cachep->num && !error) 4071 error = "slabs_partial accounting error"; 4072 if (!page->active && !error) 4073 error = "slabs_partial accounting error"; 4074 active_objs += page->active; 4075 active_slabs++; 4076 } 4077 list_for_each_entry(page, &n->slabs_free, lru) { 4078 if (page->active && !error) 4079 error = "slabs_free accounting error"; 4080 num_slabs++; 4081 } 4082 free_objects += n->free_objects; 4083 if (n->shared) 4084 shared_avail += n->shared->avail; 4085 4086 spin_unlock_irq(&n->list_lock); 4087 } 4088 num_slabs += active_slabs; 4089 num_objs = num_slabs * cachep->num; 4090 if (num_objs - active_objs != free_objects && !error) 4091 error = "free_objects accounting error"; 4092 4093 name = cachep->name; 4094 if (error) 4095 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4096 4097 sinfo->active_objs = active_objs; 4098 sinfo->num_objs = num_objs; 4099 sinfo->active_slabs = active_slabs; 4100 sinfo->num_slabs = num_slabs; 4101 sinfo->shared_avail = shared_avail; 4102 sinfo->limit = cachep->limit; 4103 sinfo->batchcount = cachep->batchcount; 4104 sinfo->shared = cachep->shared; 4105 sinfo->objects_per_slab = cachep->num; 4106 sinfo->cache_order = cachep->gfporder; 4107 } 4108 4109 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) 4110 { 4111 #if STATS 4112 { /* node stats */ 4113 unsigned long high = cachep->high_mark; 4114 unsigned long allocs = cachep->num_allocations; 4115 unsigned long grown = cachep->grown; 4116 unsigned long reaped = cachep->reaped; 4117 unsigned long errors = cachep->errors; 4118 unsigned long max_freeable = cachep->max_freeable; 4119 unsigned long node_allocs = cachep->node_allocs; 4120 unsigned long node_frees = cachep->node_frees; 4121 unsigned long overflows = cachep->node_overflow; 4122 4123 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " 4124 "%4lu %4lu %4lu %4lu %4lu", 4125 allocs, high, grown, 4126 reaped, errors, max_freeable, node_allocs, 4127 node_frees, overflows); 4128 } 4129 /* cpu stats */ 4130 { 4131 unsigned long allochit = atomic_read(&cachep->allochit); 4132 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4133 unsigned long freehit = atomic_read(&cachep->freehit); 4134 unsigned long freemiss = atomic_read(&cachep->freemiss); 4135 4136 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4137 allochit, allocmiss, freehit, freemiss); 4138 } 4139 #endif 4140 } 4141 4142 #define MAX_SLABINFO_WRITE 128 4143 /** 4144 * slabinfo_write - Tuning for the slab allocator 4145 * @file: unused 4146 * @buffer: user buffer 4147 * @count: data length 4148 * @ppos: unused 4149 */ 4150 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 4151 size_t count, loff_t *ppos) 4152 { 4153 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4154 int limit, batchcount, shared, res; 4155 struct kmem_cache *cachep; 4156 4157 if (count > MAX_SLABINFO_WRITE) 4158 return -EINVAL; 4159 if (copy_from_user(&kbuf, buffer, count)) 4160 return -EFAULT; 4161 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4162 4163 tmp = strchr(kbuf, ' '); 4164 if (!tmp) 4165 return -EINVAL; 4166 *tmp = '\0'; 4167 tmp++; 4168 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4169 return -EINVAL; 4170 4171 /* Find the cache in the chain of caches. */ 4172 mutex_lock(&slab_mutex); 4173 res = -EINVAL; 4174 list_for_each_entry(cachep, &slab_caches, list) { 4175 if (!strcmp(cachep->name, kbuf)) { 4176 if (limit < 1 || batchcount < 1 || 4177 batchcount > limit || shared < 0) { 4178 res = 0; 4179 } else { 4180 res = do_tune_cpucache(cachep, limit, 4181 batchcount, shared, 4182 GFP_KERNEL); 4183 } 4184 break; 4185 } 4186 } 4187 mutex_unlock(&slab_mutex); 4188 if (res >= 0) 4189 res = count; 4190 return res; 4191 } 4192 4193 #ifdef CONFIG_DEBUG_SLAB_LEAK 4194 4195 static void *leaks_start(struct seq_file *m, loff_t *pos) 4196 { 4197 mutex_lock(&slab_mutex); 4198 return seq_list_start(&slab_caches, *pos); 4199 } 4200 4201 static inline int add_caller(unsigned long *n, unsigned long v) 4202 { 4203 unsigned long *p; 4204 int l; 4205 if (!v) 4206 return 1; 4207 l = n[1]; 4208 p = n + 2; 4209 while (l) { 4210 int i = l/2; 4211 unsigned long *q = p + 2 * i; 4212 if (*q == v) { 4213 q[1]++; 4214 return 1; 4215 } 4216 if (*q > v) { 4217 l = i; 4218 } else { 4219 p = q + 2; 4220 l -= i + 1; 4221 } 4222 } 4223 if (++n[1] == n[0]) 4224 return 0; 4225 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4226 p[0] = v; 4227 p[1] = 1; 4228 return 1; 4229 } 4230 4231 static void handle_slab(unsigned long *n, struct kmem_cache *c, 4232 struct page *page) 4233 { 4234 void *p; 4235 int i, j; 4236 4237 if (n[0] == n[1]) 4238 return; 4239 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4240 bool active = true; 4241 4242 for (j = page->active; j < c->num; j++) { 4243 /* Skip freed item */ 4244 if (get_free_obj(page, j) == i) { 4245 active = false; 4246 break; 4247 } 4248 } 4249 if (!active) 4250 continue; 4251 4252 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4253 return; 4254 } 4255 } 4256 4257 static void show_symbol(struct seq_file *m, unsigned long address) 4258 { 4259 #ifdef CONFIG_KALLSYMS 4260 unsigned long offset, size; 4261 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4262 4263 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4264 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4265 if (modname[0]) 4266 seq_printf(m, " [%s]", modname); 4267 return; 4268 } 4269 #endif 4270 seq_printf(m, "%p", (void *)address); 4271 } 4272 4273 static int leaks_show(struct seq_file *m, void *p) 4274 { 4275 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4276 struct page *page; 4277 struct kmem_cache_node *n; 4278 const char *name; 4279 unsigned long *x = m->private; 4280 int node; 4281 int i; 4282 4283 if (!(cachep->flags & SLAB_STORE_USER)) 4284 return 0; 4285 if (!(cachep->flags & SLAB_RED_ZONE)) 4286 return 0; 4287 4288 /* OK, we can do it */ 4289 4290 x[1] = 0; 4291 4292 for_each_online_node(node) { 4293 n = cachep->node[node]; 4294 if (!n) 4295 continue; 4296 4297 check_irq_on(); 4298 spin_lock_irq(&n->list_lock); 4299 4300 list_for_each_entry(page, &n->slabs_full, lru) 4301 handle_slab(x, cachep, page); 4302 list_for_each_entry(page, &n->slabs_partial, lru) 4303 handle_slab(x, cachep, page); 4304 spin_unlock_irq(&n->list_lock); 4305 } 4306 name = cachep->name; 4307 if (x[0] == x[1]) { 4308 /* Increase the buffer size */ 4309 mutex_unlock(&slab_mutex); 4310 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4311 if (!m->private) { 4312 /* Too bad, we are really out */ 4313 m->private = x; 4314 mutex_lock(&slab_mutex); 4315 return -ENOMEM; 4316 } 4317 *(unsigned long *)m->private = x[0] * 2; 4318 kfree(x); 4319 mutex_lock(&slab_mutex); 4320 /* Now make sure this entry will be retried */ 4321 m->count = m->size; 4322 return 0; 4323 } 4324 for (i = 0; i < x[1]; i++) { 4325 seq_printf(m, "%s: %lu ", name, x[2*i+3]); 4326 show_symbol(m, x[2*i+2]); 4327 seq_putc(m, '\n'); 4328 } 4329 4330 return 0; 4331 } 4332 4333 static const struct seq_operations slabstats_op = { 4334 .start = leaks_start, 4335 .next = slab_next, 4336 .stop = slab_stop, 4337 .show = leaks_show, 4338 }; 4339 4340 static int slabstats_open(struct inode *inode, struct file *file) 4341 { 4342 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); 4343 int ret = -ENOMEM; 4344 if (n) { 4345 ret = seq_open(file, &slabstats_op); 4346 if (!ret) { 4347 struct seq_file *m = file->private_data; 4348 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4349 m->private = n; 4350 n = NULL; 4351 } 4352 kfree(n); 4353 } 4354 return ret; 4355 } 4356 4357 static const struct file_operations proc_slabstats_operations = { 4358 .open = slabstats_open, 4359 .read = seq_read, 4360 .llseek = seq_lseek, 4361 .release = seq_release_private, 4362 }; 4363 #endif 4364 4365 static int __init slab_proc_init(void) 4366 { 4367 #ifdef CONFIG_DEBUG_SLAB_LEAK 4368 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4369 #endif 4370 return 0; 4371 } 4372 module_init(slab_proc_init); 4373 #endif 4374 4375 /** 4376 * ksize - get the actual amount of memory allocated for a given object 4377 * @objp: Pointer to the object 4378 * 4379 * kmalloc may internally round up allocations and return more memory 4380 * than requested. ksize() can be used to determine the actual amount of 4381 * memory allocated. The caller may use this additional memory, even though 4382 * a smaller amount of memory was initially specified with the kmalloc call. 4383 * The caller must guarantee that objp points to a valid object previously 4384 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4385 * must not be freed during the duration of the call. 4386 */ 4387 size_t ksize(const void *objp) 4388 { 4389 BUG_ON(!objp); 4390 if (unlikely(objp == ZERO_SIZE_PTR)) 4391 return 0; 4392 4393 return virt_to_cache(objp)->object_size; 4394 } 4395 EXPORT_SYMBOL(ksize); 4396