1 /* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same initializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89 #include <linux/slab.h> 90 #include <linux/mm.h> 91 #include <linux/poison.h> 92 #include <linux/swap.h> 93 #include <linux/cache.h> 94 #include <linux/interrupt.h> 95 #include <linux/init.h> 96 #include <linux/compiler.h> 97 #include <linux/cpuset.h> 98 #include <linux/seq_file.h> 99 #include <linux/notifier.h> 100 #include <linux/kallsyms.h> 101 #include <linux/cpu.h> 102 #include <linux/sysctl.h> 103 #include <linux/module.h> 104 #include <linux/rcupdate.h> 105 #include <linux/string.h> 106 #include <linux/uaccess.h> 107 #include <linux/nodemask.h> 108 #include <linux/mempolicy.h> 109 #include <linux/mutex.h> 110 #include <linux/fault-inject.h> 111 #include <linux/rtmutex.h> 112 #include <linux/reciprocal_div.h> 113 114 #include <asm/cacheflush.h> 115 #include <asm/tlbflush.h> 116 #include <asm/page.h> 117 118 /* 119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 120 * 0 for faster, smaller code (especially in the critical paths). 121 * 122 * STATS - 1 to collect stats for /proc/slabinfo. 123 * 0 for faster, smaller code (especially in the critical paths). 124 * 125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 126 */ 127 128 #ifdef CONFIG_DEBUG_SLAB 129 #define DEBUG 1 130 #define STATS 1 131 #define FORCED_DEBUG 1 132 #else 133 #define DEBUG 0 134 #define STATS 0 135 #define FORCED_DEBUG 0 136 #endif 137 138 /* Shouldn't this be in a header file somewhere? */ 139 #define BYTES_PER_WORD sizeof(void *) 140 #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 141 142 #ifndef cache_line_size 143 #define cache_line_size() L1_CACHE_BYTES 144 #endif 145 146 #ifndef ARCH_KMALLOC_MINALIGN 147 /* 148 * Enforce a minimum alignment for the kmalloc caches. 149 * Usually, the kmalloc caches are cache_line_size() aligned, except when 150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 152 * alignment larger than the alignment of a 64-bit integer. 153 * ARCH_KMALLOC_MINALIGN allows that. 154 * Note that increasing this value may disable some debug features. 155 */ 156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157 #endif 158 159 #ifndef ARCH_SLAB_MINALIGN 160 /* 161 * Enforce a minimum alignment for all caches. 162 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 163 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 164 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 165 * some debug features. 166 */ 167 #define ARCH_SLAB_MINALIGN 0 168 #endif 169 170 #ifndef ARCH_KMALLOC_FLAGS 171 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 172 #endif 173 174 /* Legal flag mask for kmem_cache_create(). */ 175 #if DEBUG 176 # define CREATE_MASK (SLAB_RED_ZONE | \ 177 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 178 SLAB_CACHE_DMA | \ 179 SLAB_STORE_USER | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 182 #else 183 # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184 SLAB_CACHE_DMA | \ 185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 187 #endif 188 189 /* 190 * kmem_bufctl_t: 191 * 192 * Bufctl's are used for linking objs within a slab 193 * linked offsets. 194 * 195 * This implementation relies on "struct page" for locating the cache & 196 * slab an object belongs to. 197 * This allows the bufctl structure to be small (one int), but limits 198 * the number of objects a slab (not a cache) can contain when off-slab 199 * bufctls are used. The limit is the size of the largest general cache 200 * that does not use off-slab slabs. 201 * For 32bit archs with 4 kB pages, is this 56. 202 * This is not serious, as it is only for large objects, when it is unwise 203 * to have too many per slab. 204 * Note: This limit can be raised by introducing a general cache whose size 205 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 206 */ 207 208 typedef unsigned int kmem_bufctl_t; 209 #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 210 #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 211 #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 212 #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 213 214 /* 215 * struct slab 216 * 217 * Manages the objs in a slab. Placed either at the beginning of mem allocated 218 * for a slab, or allocated from an general cache. 219 * Slabs are chained into three list: fully used, partial, fully free slabs. 220 */ 221 struct slab { 222 struct list_head list; 223 unsigned long colouroff; 224 void *s_mem; /* including colour offset */ 225 unsigned int inuse; /* num of objs active in slab */ 226 kmem_bufctl_t free; 227 unsigned short nodeid; 228 }; 229 230 /* 231 * struct slab_rcu 232 * 233 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 234 * arrange for kmem_freepages to be called via RCU. This is useful if 235 * we need to approach a kernel structure obliquely, from its address 236 * obtained without the usual locking. We can lock the structure to 237 * stabilize it and check it's still at the given address, only if we 238 * can be sure that the memory has not been meanwhile reused for some 239 * other kind of object (which our subsystem's lock might corrupt). 240 * 241 * rcu_read_lock before reading the address, then rcu_read_unlock after 242 * taking the spinlock within the structure expected at that address. 243 * 244 * We assume struct slab_rcu can overlay struct slab when destroying. 245 */ 246 struct slab_rcu { 247 struct rcu_head head; 248 struct kmem_cache *cachep; 249 void *addr; 250 }; 251 252 /* 253 * struct array_cache 254 * 255 * Purpose: 256 * - LIFO ordering, to hand out cache-warm objects from _alloc 257 * - reduce the number of linked list operations 258 * - reduce spinlock operations 259 * 260 * The limit is stored in the per-cpu structure to reduce the data cache 261 * footprint. 262 * 263 */ 264 struct array_cache { 265 unsigned int avail; 266 unsigned int limit; 267 unsigned int batchcount; 268 unsigned int touched; 269 spinlock_t lock; 270 void *entry[]; /* 271 * Must have this definition in here for the proper 272 * alignment of array_cache. Also simplifies accessing 273 * the entries. 274 */ 275 }; 276 277 /* 278 * bootstrap: The caches do not work without cpuarrays anymore, but the 279 * cpuarrays are allocated from the generic caches... 280 */ 281 #define BOOT_CPUCACHE_ENTRIES 1 282 struct arraycache_init { 283 struct array_cache cache; 284 void *entries[BOOT_CPUCACHE_ENTRIES]; 285 }; 286 287 /* 288 * The slab lists for all objects. 289 */ 290 struct kmem_list3 { 291 struct list_head slabs_partial; /* partial list first, better asm code */ 292 struct list_head slabs_full; 293 struct list_head slabs_free; 294 unsigned long free_objects; 295 unsigned int free_limit; 296 unsigned int colour_next; /* Per-node cache coloring */ 297 spinlock_t list_lock; 298 struct array_cache *shared; /* shared per node */ 299 struct array_cache **alien; /* on other nodes */ 300 unsigned long next_reap; /* updated without locking */ 301 int free_touched; /* updated without locking */ 302 }; 303 304 /* 305 * Need this for bootstrapping a per node allocator. 306 */ 307 #define NUM_INIT_LISTS (3 * MAX_NUMNODES) 308 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 309 #define CACHE_CACHE 0 310 #define SIZE_AC MAX_NUMNODES 311 #define SIZE_L3 (2 * MAX_NUMNODES) 312 313 static int drain_freelist(struct kmem_cache *cache, 314 struct kmem_list3 *l3, int tofree); 315 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 316 int node); 317 static int enable_cpucache(struct kmem_cache *cachep); 318 static void cache_reap(struct work_struct *unused); 319 320 /* 321 * This function must be completely optimized away if a constant is passed to 322 * it. Mostly the same as what is in linux/slab.h except it returns an index. 323 */ 324 static __always_inline int index_of(const size_t size) 325 { 326 extern void __bad_size(void); 327 328 if (__builtin_constant_p(size)) { 329 int i = 0; 330 331 #define CACHE(x) \ 332 if (size <=x) \ 333 return i; \ 334 else \ 335 i++; 336 #include <linux/kmalloc_sizes.h> 337 #undef CACHE 338 __bad_size(); 339 } else 340 __bad_size(); 341 return 0; 342 } 343 344 static int slab_early_init = 1; 345 346 #define INDEX_AC index_of(sizeof(struct arraycache_init)) 347 #define INDEX_L3 index_of(sizeof(struct kmem_list3)) 348 349 static void kmem_list3_init(struct kmem_list3 *parent) 350 { 351 INIT_LIST_HEAD(&parent->slabs_full); 352 INIT_LIST_HEAD(&parent->slabs_partial); 353 INIT_LIST_HEAD(&parent->slabs_free); 354 parent->shared = NULL; 355 parent->alien = NULL; 356 parent->colour_next = 0; 357 spin_lock_init(&parent->list_lock); 358 parent->free_objects = 0; 359 parent->free_touched = 0; 360 } 361 362 #define MAKE_LIST(cachep, listp, slab, nodeid) \ 363 do { \ 364 INIT_LIST_HEAD(listp); \ 365 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 366 } while (0) 367 368 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 369 do { \ 370 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 371 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 372 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 373 } while (0) 374 375 /* 376 * struct kmem_cache 377 * 378 * manages a cache. 379 */ 380 381 struct kmem_cache { 382 /* 1) per-cpu data, touched during every alloc/free */ 383 struct array_cache *array[NR_CPUS]; 384 /* 2) Cache tunables. Protected by cache_chain_mutex */ 385 unsigned int batchcount; 386 unsigned int limit; 387 unsigned int shared; 388 389 unsigned int buffer_size; 390 u32 reciprocal_buffer_size; 391 /* 3) touched by every alloc & free from the backend */ 392 393 unsigned int flags; /* constant flags */ 394 unsigned int num; /* # of objs per slab */ 395 396 /* 4) cache_grow/shrink */ 397 /* order of pgs per slab (2^n) */ 398 unsigned int gfporder; 399 400 /* force GFP flags, e.g. GFP_DMA */ 401 gfp_t gfpflags; 402 403 size_t colour; /* cache colouring range */ 404 unsigned int colour_off; /* colour offset */ 405 struct kmem_cache *slabp_cache; 406 unsigned int slab_size; 407 unsigned int dflags; /* dynamic flags */ 408 409 /* constructor func */ 410 void (*ctor)(struct kmem_cache *, void *); 411 412 /* 5) cache creation/removal */ 413 const char *name; 414 struct list_head next; 415 416 /* 6) statistics */ 417 #if STATS 418 unsigned long num_active; 419 unsigned long num_allocations; 420 unsigned long high_mark; 421 unsigned long grown; 422 unsigned long reaped; 423 unsigned long errors; 424 unsigned long max_freeable; 425 unsigned long node_allocs; 426 unsigned long node_frees; 427 unsigned long node_overflow; 428 atomic_t allochit; 429 atomic_t allocmiss; 430 atomic_t freehit; 431 atomic_t freemiss; 432 #endif 433 #if DEBUG 434 /* 435 * If debugging is enabled, then the allocator can add additional 436 * fields and/or padding to every object. buffer_size contains the total 437 * object size including these internal fields, the following two 438 * variables contain the offset to the user object and its size. 439 */ 440 int obj_offset; 441 int obj_size; 442 #endif 443 /* 444 * We put nodelists[] at the end of kmem_cache, because we want to size 445 * this array to nr_node_ids slots instead of MAX_NUMNODES 446 * (see kmem_cache_init()) 447 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache 448 * is statically defined, so we reserve the max number of nodes. 449 */ 450 struct kmem_list3 *nodelists[MAX_NUMNODES]; 451 /* 452 * Do not add fields after nodelists[] 453 */ 454 }; 455 456 #define CFLGS_OFF_SLAB (0x80000000UL) 457 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 458 459 #define BATCHREFILL_LIMIT 16 460 /* 461 * Optimization question: fewer reaps means less probability for unnessary 462 * cpucache drain/refill cycles. 463 * 464 * OTOH the cpuarrays can contain lots of objects, 465 * which could lock up otherwise freeable slabs. 466 */ 467 #define REAPTIMEOUT_CPUC (2*HZ) 468 #define REAPTIMEOUT_LIST3 (4*HZ) 469 470 #if STATS 471 #define STATS_INC_ACTIVE(x) ((x)->num_active++) 472 #define STATS_DEC_ACTIVE(x) ((x)->num_active--) 473 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 474 #define STATS_INC_GROWN(x) ((x)->grown++) 475 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 476 #define STATS_SET_HIGH(x) \ 477 do { \ 478 if ((x)->num_active > (x)->high_mark) \ 479 (x)->high_mark = (x)->num_active; \ 480 } while (0) 481 #define STATS_INC_ERR(x) ((x)->errors++) 482 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 483 #define STATS_INC_NODEFREES(x) ((x)->node_frees++) 484 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 485 #define STATS_SET_FREEABLE(x, i) \ 486 do { \ 487 if ((x)->max_freeable < i) \ 488 (x)->max_freeable = i; \ 489 } while (0) 490 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 491 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 492 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 493 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 494 #else 495 #define STATS_INC_ACTIVE(x) do { } while (0) 496 #define STATS_DEC_ACTIVE(x) do { } while (0) 497 #define STATS_INC_ALLOCED(x) do { } while (0) 498 #define STATS_INC_GROWN(x) do { } while (0) 499 #define STATS_ADD_REAPED(x,y) do { } while (0) 500 #define STATS_SET_HIGH(x) do { } while (0) 501 #define STATS_INC_ERR(x) do { } while (0) 502 #define STATS_INC_NODEALLOCS(x) do { } while (0) 503 #define STATS_INC_NODEFREES(x) do { } while (0) 504 #define STATS_INC_ACOVERFLOW(x) do { } while (0) 505 #define STATS_SET_FREEABLE(x, i) do { } while (0) 506 #define STATS_INC_ALLOCHIT(x) do { } while (0) 507 #define STATS_INC_ALLOCMISS(x) do { } while (0) 508 #define STATS_INC_FREEHIT(x) do { } while (0) 509 #define STATS_INC_FREEMISS(x) do { } while (0) 510 #endif 511 512 #if DEBUG 513 514 /* 515 * memory layout of objects: 516 * 0 : objp 517 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 518 * the end of an object is aligned with the end of the real 519 * allocation. Catches writes behind the end of the allocation. 520 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 521 * redzone word. 522 * cachep->obj_offset: The real object. 523 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 524 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 525 * [BYTES_PER_WORD long] 526 */ 527 static int obj_offset(struct kmem_cache *cachep) 528 { 529 return cachep->obj_offset; 530 } 531 532 static int obj_size(struct kmem_cache *cachep) 533 { 534 return cachep->obj_size; 535 } 536 537 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 538 { 539 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 540 return (unsigned long long*) (objp + obj_offset(cachep) - 541 sizeof(unsigned long long)); 542 } 543 544 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 545 { 546 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 547 if (cachep->flags & SLAB_STORE_USER) 548 return (unsigned long long *)(objp + cachep->buffer_size - 549 sizeof(unsigned long long) - 550 REDZONE_ALIGN); 551 return (unsigned long long *) (objp + cachep->buffer_size - 552 sizeof(unsigned long long)); 553 } 554 555 static void **dbg_userword(struct kmem_cache *cachep, void *objp) 556 { 557 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 558 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 559 } 560 561 #else 562 563 #define obj_offset(x) 0 564 #define obj_size(cachep) (cachep->buffer_size) 565 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 566 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 567 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 568 569 #endif 570 571 /* 572 * Do not go above this order unless 0 objects fit into the slab. 573 */ 574 #define BREAK_GFP_ORDER_HI 1 575 #define BREAK_GFP_ORDER_LO 0 576 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 577 578 /* 579 * Functions for storing/retrieving the cachep and or slab from the page 580 * allocator. These are used to find the slab an obj belongs to. With kfree(), 581 * these are used to find the cache which an obj belongs to. 582 */ 583 static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 584 { 585 page->lru.next = (struct list_head *)cache; 586 } 587 588 static inline struct kmem_cache *page_get_cache(struct page *page) 589 { 590 page = compound_head(page); 591 BUG_ON(!PageSlab(page)); 592 return (struct kmem_cache *)page->lru.next; 593 } 594 595 static inline void page_set_slab(struct page *page, struct slab *slab) 596 { 597 page->lru.prev = (struct list_head *)slab; 598 } 599 600 static inline struct slab *page_get_slab(struct page *page) 601 { 602 BUG_ON(!PageSlab(page)); 603 return (struct slab *)page->lru.prev; 604 } 605 606 static inline struct kmem_cache *virt_to_cache(const void *obj) 607 { 608 struct page *page = virt_to_head_page(obj); 609 return page_get_cache(page); 610 } 611 612 static inline struct slab *virt_to_slab(const void *obj) 613 { 614 struct page *page = virt_to_head_page(obj); 615 return page_get_slab(page); 616 } 617 618 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 619 unsigned int idx) 620 { 621 return slab->s_mem + cache->buffer_size * idx; 622 } 623 624 /* 625 * We want to avoid an expensive divide : (offset / cache->buffer_size) 626 * Using the fact that buffer_size is a constant for a particular cache, 627 * we can replace (offset / cache->buffer_size) by 628 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 629 */ 630 static inline unsigned int obj_to_index(const struct kmem_cache *cache, 631 const struct slab *slab, void *obj) 632 { 633 u32 offset = (obj - slab->s_mem); 634 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 635 } 636 637 /* 638 * These are the default caches for kmalloc. Custom caches can have other sizes. 639 */ 640 struct cache_sizes malloc_sizes[] = { 641 #define CACHE(x) { .cs_size = (x) }, 642 #include <linux/kmalloc_sizes.h> 643 CACHE(ULONG_MAX) 644 #undef CACHE 645 }; 646 EXPORT_SYMBOL(malloc_sizes); 647 648 /* Must match cache_sizes above. Out of line to keep cache footprint low. */ 649 struct cache_names { 650 char *name; 651 char *name_dma; 652 }; 653 654 static struct cache_names __initdata cache_names[] = { 655 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 656 #include <linux/kmalloc_sizes.h> 657 {NULL,} 658 #undef CACHE 659 }; 660 661 static struct arraycache_init initarray_cache __initdata = 662 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 663 static struct arraycache_init initarray_generic = 664 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 665 666 /* internal cache of cache description objs */ 667 static struct kmem_cache cache_cache = { 668 .batchcount = 1, 669 .limit = BOOT_CPUCACHE_ENTRIES, 670 .shared = 1, 671 .buffer_size = sizeof(struct kmem_cache), 672 .name = "kmem_cache", 673 }; 674 675 #define BAD_ALIEN_MAGIC 0x01020304ul 676 677 #ifdef CONFIG_LOCKDEP 678 679 /* 680 * Slab sometimes uses the kmalloc slabs to store the slab headers 681 * for other slabs "off slab". 682 * The locking for this is tricky in that it nests within the locks 683 * of all other slabs in a few places; to deal with this special 684 * locking we put on-slab caches into a separate lock-class. 685 * 686 * We set lock class for alien array caches which are up during init. 687 * The lock annotation will be lost if all cpus of a node goes down and 688 * then comes back up during hotplug 689 */ 690 static struct lock_class_key on_slab_l3_key; 691 static struct lock_class_key on_slab_alc_key; 692 693 static inline void init_lock_keys(void) 694 695 { 696 int q; 697 struct cache_sizes *s = malloc_sizes; 698 699 while (s->cs_size != ULONG_MAX) { 700 for_each_node(q) { 701 struct array_cache **alc; 702 int r; 703 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 704 if (!l3 || OFF_SLAB(s->cs_cachep)) 705 continue; 706 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 707 alc = l3->alien; 708 /* 709 * FIXME: This check for BAD_ALIEN_MAGIC 710 * should go away when common slab code is taught to 711 * work even without alien caches. 712 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 713 * for alloc_alien_cache, 714 */ 715 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 716 continue; 717 for_each_node(r) { 718 if (alc[r]) 719 lockdep_set_class(&alc[r]->lock, 720 &on_slab_alc_key); 721 } 722 } 723 s++; 724 } 725 } 726 #else 727 static inline void init_lock_keys(void) 728 { 729 } 730 #endif 731 732 /* 733 * Guard access to the cache-chain. 734 */ 735 static DEFINE_MUTEX(cache_chain_mutex); 736 static struct list_head cache_chain; 737 738 /* 739 * chicken and egg problem: delay the per-cpu array allocation 740 * until the general caches are up. 741 */ 742 static enum { 743 NONE, 744 PARTIAL_AC, 745 PARTIAL_L3, 746 FULL 747 } g_cpucache_up; 748 749 /* 750 * used by boot code to determine if it can use slab based allocator 751 */ 752 int slab_is_available(void) 753 { 754 return g_cpucache_up == FULL; 755 } 756 757 static DEFINE_PER_CPU(struct delayed_work, reap_work); 758 759 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 760 { 761 return cachep->array[smp_processor_id()]; 762 } 763 764 static inline struct kmem_cache *__find_general_cachep(size_t size, 765 gfp_t gfpflags) 766 { 767 struct cache_sizes *csizep = malloc_sizes; 768 769 #if DEBUG 770 /* This happens if someone tries to call 771 * kmem_cache_create(), or __kmalloc(), before 772 * the generic caches are initialized. 773 */ 774 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 775 #endif 776 if (!size) 777 return ZERO_SIZE_PTR; 778 779 while (size > csizep->cs_size) 780 csizep++; 781 782 /* 783 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 784 * has cs_{dma,}cachep==NULL. Thus no special case 785 * for large kmalloc calls required. 786 */ 787 #ifdef CONFIG_ZONE_DMA 788 if (unlikely(gfpflags & GFP_DMA)) 789 return csizep->cs_dmacachep; 790 #endif 791 return csizep->cs_cachep; 792 } 793 794 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 795 { 796 return __find_general_cachep(size, gfpflags); 797 } 798 799 static size_t slab_mgmt_size(size_t nr_objs, size_t align) 800 { 801 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 802 } 803 804 /* 805 * Calculate the number of objects and left-over bytes for a given buffer size. 806 */ 807 static void cache_estimate(unsigned long gfporder, size_t buffer_size, 808 size_t align, int flags, size_t *left_over, 809 unsigned int *num) 810 { 811 int nr_objs; 812 size_t mgmt_size; 813 size_t slab_size = PAGE_SIZE << gfporder; 814 815 /* 816 * The slab management structure can be either off the slab or 817 * on it. For the latter case, the memory allocated for a 818 * slab is used for: 819 * 820 * - The struct slab 821 * - One kmem_bufctl_t for each object 822 * - Padding to respect alignment of @align 823 * - @buffer_size bytes for each object 824 * 825 * If the slab management structure is off the slab, then the 826 * alignment will already be calculated into the size. Because 827 * the slabs are all pages aligned, the objects will be at the 828 * correct alignment when allocated. 829 */ 830 if (flags & CFLGS_OFF_SLAB) { 831 mgmt_size = 0; 832 nr_objs = slab_size / buffer_size; 833 834 if (nr_objs > SLAB_LIMIT) 835 nr_objs = SLAB_LIMIT; 836 } else { 837 /* 838 * Ignore padding for the initial guess. The padding 839 * is at most @align-1 bytes, and @buffer_size is at 840 * least @align. In the worst case, this result will 841 * be one greater than the number of objects that fit 842 * into the memory allocation when taking the padding 843 * into account. 844 */ 845 nr_objs = (slab_size - sizeof(struct slab)) / 846 (buffer_size + sizeof(kmem_bufctl_t)); 847 848 /* 849 * This calculated number will be either the right 850 * amount, or one greater than what we want. 851 */ 852 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 853 > slab_size) 854 nr_objs--; 855 856 if (nr_objs > SLAB_LIMIT) 857 nr_objs = SLAB_LIMIT; 858 859 mgmt_size = slab_mgmt_size(nr_objs, align); 860 } 861 *num = nr_objs; 862 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 863 } 864 865 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 866 867 static void __slab_error(const char *function, struct kmem_cache *cachep, 868 char *msg) 869 { 870 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 871 function, cachep->name, msg); 872 dump_stack(); 873 } 874 875 /* 876 * By default on NUMA we use alien caches to stage the freeing of 877 * objects allocated from other nodes. This causes massive memory 878 * inefficiencies when using fake NUMA setup to split memory into a 879 * large number of small nodes, so it can be disabled on the command 880 * line 881 */ 882 883 static int use_alien_caches __read_mostly = 1; 884 static int numa_platform __read_mostly = 1; 885 static int __init noaliencache_setup(char *s) 886 { 887 use_alien_caches = 0; 888 return 1; 889 } 890 __setup("noaliencache", noaliencache_setup); 891 892 #ifdef CONFIG_NUMA 893 /* 894 * Special reaping functions for NUMA systems called from cache_reap(). 895 * These take care of doing round robin flushing of alien caches (containing 896 * objects freed on different nodes from which they were allocated) and the 897 * flushing of remote pcps by calling drain_node_pages. 898 */ 899 static DEFINE_PER_CPU(unsigned long, reap_node); 900 901 static void init_reap_node(int cpu) 902 { 903 int node; 904 905 node = next_node(cpu_to_node(cpu), node_online_map); 906 if (node == MAX_NUMNODES) 907 node = first_node(node_online_map); 908 909 per_cpu(reap_node, cpu) = node; 910 } 911 912 static void next_reap_node(void) 913 { 914 int node = __get_cpu_var(reap_node); 915 916 node = next_node(node, node_online_map); 917 if (unlikely(node >= MAX_NUMNODES)) 918 node = first_node(node_online_map); 919 __get_cpu_var(reap_node) = node; 920 } 921 922 #else 923 #define init_reap_node(cpu) do { } while (0) 924 #define next_reap_node(void) do { } while (0) 925 #endif 926 927 /* 928 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 929 * via the workqueue/eventd. 930 * Add the CPU number into the expiration time to minimize the possibility of 931 * the CPUs getting into lockstep and contending for the global cache chain 932 * lock. 933 */ 934 static void __cpuinit start_cpu_timer(int cpu) 935 { 936 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 937 938 /* 939 * When this gets called from do_initcalls via cpucache_init(), 940 * init_workqueues() has already run, so keventd will be setup 941 * at that time. 942 */ 943 if (keventd_up() && reap_work->work.func == NULL) { 944 init_reap_node(cpu); 945 INIT_DELAYED_WORK(reap_work, cache_reap); 946 schedule_delayed_work_on(cpu, reap_work, 947 __round_jiffies_relative(HZ, cpu)); 948 } 949 } 950 951 static struct array_cache *alloc_arraycache(int node, int entries, 952 int batchcount) 953 { 954 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 955 struct array_cache *nc = NULL; 956 957 nc = kmalloc_node(memsize, GFP_KERNEL, node); 958 if (nc) { 959 nc->avail = 0; 960 nc->limit = entries; 961 nc->batchcount = batchcount; 962 nc->touched = 0; 963 spin_lock_init(&nc->lock); 964 } 965 return nc; 966 } 967 968 /* 969 * Transfer objects in one arraycache to another. 970 * Locking must be handled by the caller. 971 * 972 * Return the number of entries transferred. 973 */ 974 static int transfer_objects(struct array_cache *to, 975 struct array_cache *from, unsigned int max) 976 { 977 /* Figure out how many entries to transfer */ 978 int nr = min(min(from->avail, max), to->limit - to->avail); 979 980 if (!nr) 981 return 0; 982 983 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 984 sizeof(void *) *nr); 985 986 from->avail -= nr; 987 to->avail += nr; 988 to->touched = 1; 989 return nr; 990 } 991 992 #ifndef CONFIG_NUMA 993 994 #define drain_alien_cache(cachep, alien) do { } while (0) 995 #define reap_alien(cachep, l3) do { } while (0) 996 997 static inline struct array_cache **alloc_alien_cache(int node, int limit) 998 { 999 return (struct array_cache **)BAD_ALIEN_MAGIC; 1000 } 1001 1002 static inline void free_alien_cache(struct array_cache **ac_ptr) 1003 { 1004 } 1005 1006 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1007 { 1008 return 0; 1009 } 1010 1011 static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1012 gfp_t flags) 1013 { 1014 return NULL; 1015 } 1016 1017 static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1018 gfp_t flags, int nodeid) 1019 { 1020 return NULL; 1021 } 1022 1023 #else /* CONFIG_NUMA */ 1024 1025 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1026 static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1027 1028 static struct array_cache **alloc_alien_cache(int node, int limit) 1029 { 1030 struct array_cache **ac_ptr; 1031 int memsize = sizeof(void *) * nr_node_ids; 1032 int i; 1033 1034 if (limit > 1) 1035 limit = 12; 1036 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1037 if (ac_ptr) { 1038 for_each_node(i) { 1039 if (i == node || !node_online(i)) { 1040 ac_ptr[i] = NULL; 1041 continue; 1042 } 1043 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1044 if (!ac_ptr[i]) { 1045 for (i--; i >= 0; i--) 1046 kfree(ac_ptr[i]); 1047 kfree(ac_ptr); 1048 return NULL; 1049 } 1050 } 1051 } 1052 return ac_ptr; 1053 } 1054 1055 static void free_alien_cache(struct array_cache **ac_ptr) 1056 { 1057 int i; 1058 1059 if (!ac_ptr) 1060 return; 1061 for_each_node(i) 1062 kfree(ac_ptr[i]); 1063 kfree(ac_ptr); 1064 } 1065 1066 static void __drain_alien_cache(struct kmem_cache *cachep, 1067 struct array_cache *ac, int node) 1068 { 1069 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1070 1071 if (ac->avail) { 1072 spin_lock(&rl3->list_lock); 1073 /* 1074 * Stuff objects into the remote nodes shared array first. 1075 * That way we could avoid the overhead of putting the objects 1076 * into the free lists and getting them back later. 1077 */ 1078 if (rl3->shared) 1079 transfer_objects(rl3->shared, ac, ac->limit); 1080 1081 free_block(cachep, ac->entry, ac->avail, node); 1082 ac->avail = 0; 1083 spin_unlock(&rl3->list_lock); 1084 } 1085 } 1086 1087 /* 1088 * Called from cache_reap() to regularly drain alien caches round robin. 1089 */ 1090 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1091 { 1092 int node = __get_cpu_var(reap_node); 1093 1094 if (l3->alien) { 1095 struct array_cache *ac = l3->alien[node]; 1096 1097 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1098 __drain_alien_cache(cachep, ac, node); 1099 spin_unlock_irq(&ac->lock); 1100 } 1101 } 1102 } 1103 1104 static void drain_alien_cache(struct kmem_cache *cachep, 1105 struct array_cache **alien) 1106 { 1107 int i = 0; 1108 struct array_cache *ac; 1109 unsigned long flags; 1110 1111 for_each_online_node(i) { 1112 ac = alien[i]; 1113 if (ac) { 1114 spin_lock_irqsave(&ac->lock, flags); 1115 __drain_alien_cache(cachep, ac, i); 1116 spin_unlock_irqrestore(&ac->lock, flags); 1117 } 1118 } 1119 } 1120 1121 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1122 { 1123 struct slab *slabp = virt_to_slab(objp); 1124 int nodeid = slabp->nodeid; 1125 struct kmem_list3 *l3; 1126 struct array_cache *alien = NULL; 1127 int node; 1128 1129 node = numa_node_id(); 1130 1131 /* 1132 * Make sure we are not freeing a object from another node to the array 1133 * cache on this cpu. 1134 */ 1135 if (likely(slabp->nodeid == node)) 1136 return 0; 1137 1138 l3 = cachep->nodelists[node]; 1139 STATS_INC_NODEFREES(cachep); 1140 if (l3->alien && l3->alien[nodeid]) { 1141 alien = l3->alien[nodeid]; 1142 spin_lock(&alien->lock); 1143 if (unlikely(alien->avail == alien->limit)) { 1144 STATS_INC_ACOVERFLOW(cachep); 1145 __drain_alien_cache(cachep, alien, nodeid); 1146 } 1147 alien->entry[alien->avail++] = objp; 1148 spin_unlock(&alien->lock); 1149 } else { 1150 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1151 free_block(cachep, &objp, 1, nodeid); 1152 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1153 } 1154 return 1; 1155 } 1156 #endif 1157 1158 static void __cpuinit cpuup_canceled(long cpu) 1159 { 1160 struct kmem_cache *cachep; 1161 struct kmem_list3 *l3 = NULL; 1162 int node = cpu_to_node(cpu); 1163 1164 list_for_each_entry(cachep, &cache_chain, next) { 1165 struct array_cache *nc; 1166 struct array_cache *shared; 1167 struct array_cache **alien; 1168 cpumask_t mask; 1169 1170 mask = node_to_cpumask(node); 1171 /* cpu is dead; no one can alloc from it. */ 1172 nc = cachep->array[cpu]; 1173 cachep->array[cpu] = NULL; 1174 l3 = cachep->nodelists[node]; 1175 1176 if (!l3) 1177 goto free_array_cache; 1178 1179 spin_lock_irq(&l3->list_lock); 1180 1181 /* Free limit for this kmem_list3 */ 1182 l3->free_limit -= cachep->batchcount; 1183 if (nc) 1184 free_block(cachep, nc->entry, nc->avail, node); 1185 1186 if (!cpus_empty(mask)) { 1187 spin_unlock_irq(&l3->list_lock); 1188 goto free_array_cache; 1189 } 1190 1191 shared = l3->shared; 1192 if (shared) { 1193 free_block(cachep, shared->entry, 1194 shared->avail, node); 1195 l3->shared = NULL; 1196 } 1197 1198 alien = l3->alien; 1199 l3->alien = NULL; 1200 1201 spin_unlock_irq(&l3->list_lock); 1202 1203 kfree(shared); 1204 if (alien) { 1205 drain_alien_cache(cachep, alien); 1206 free_alien_cache(alien); 1207 } 1208 free_array_cache: 1209 kfree(nc); 1210 } 1211 /* 1212 * In the previous loop, all the objects were freed to 1213 * the respective cache's slabs, now we can go ahead and 1214 * shrink each nodelist to its limit. 1215 */ 1216 list_for_each_entry(cachep, &cache_chain, next) { 1217 l3 = cachep->nodelists[node]; 1218 if (!l3) 1219 continue; 1220 drain_freelist(cachep, l3, l3->free_objects); 1221 } 1222 } 1223 1224 static int __cpuinit cpuup_prepare(long cpu) 1225 { 1226 struct kmem_cache *cachep; 1227 struct kmem_list3 *l3 = NULL; 1228 int node = cpu_to_node(cpu); 1229 const int memsize = sizeof(struct kmem_list3); 1230 1231 /* 1232 * We need to do this right in the beginning since 1233 * alloc_arraycache's are going to use this list. 1234 * kmalloc_node allows us to add the slab to the right 1235 * kmem_list3 and not this cpu's kmem_list3 1236 */ 1237 1238 list_for_each_entry(cachep, &cache_chain, next) { 1239 /* 1240 * Set up the size64 kmemlist for cpu before we can 1241 * begin anything. Make sure some other cpu on this 1242 * node has not already allocated this 1243 */ 1244 if (!cachep->nodelists[node]) { 1245 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1246 if (!l3) 1247 goto bad; 1248 kmem_list3_init(l3); 1249 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1250 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1251 1252 /* 1253 * The l3s don't come and go as CPUs come and 1254 * go. cache_chain_mutex is sufficient 1255 * protection here. 1256 */ 1257 cachep->nodelists[node] = l3; 1258 } 1259 1260 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1261 cachep->nodelists[node]->free_limit = 1262 (1 + nr_cpus_node(node)) * 1263 cachep->batchcount + cachep->num; 1264 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1265 } 1266 1267 /* 1268 * Now we can go ahead with allocating the shared arrays and 1269 * array caches 1270 */ 1271 list_for_each_entry(cachep, &cache_chain, next) { 1272 struct array_cache *nc; 1273 struct array_cache *shared = NULL; 1274 struct array_cache **alien = NULL; 1275 1276 nc = alloc_arraycache(node, cachep->limit, 1277 cachep->batchcount); 1278 if (!nc) 1279 goto bad; 1280 if (cachep->shared) { 1281 shared = alloc_arraycache(node, 1282 cachep->shared * cachep->batchcount, 1283 0xbaadf00d); 1284 if (!shared) { 1285 kfree(nc); 1286 goto bad; 1287 } 1288 } 1289 if (use_alien_caches) { 1290 alien = alloc_alien_cache(node, cachep->limit); 1291 if (!alien) { 1292 kfree(shared); 1293 kfree(nc); 1294 goto bad; 1295 } 1296 } 1297 cachep->array[cpu] = nc; 1298 l3 = cachep->nodelists[node]; 1299 BUG_ON(!l3); 1300 1301 spin_lock_irq(&l3->list_lock); 1302 if (!l3->shared) { 1303 /* 1304 * We are serialised from CPU_DEAD or 1305 * CPU_UP_CANCELLED by the cpucontrol lock 1306 */ 1307 l3->shared = shared; 1308 shared = NULL; 1309 } 1310 #ifdef CONFIG_NUMA 1311 if (!l3->alien) { 1312 l3->alien = alien; 1313 alien = NULL; 1314 } 1315 #endif 1316 spin_unlock_irq(&l3->list_lock); 1317 kfree(shared); 1318 free_alien_cache(alien); 1319 } 1320 return 0; 1321 bad: 1322 cpuup_canceled(cpu); 1323 return -ENOMEM; 1324 } 1325 1326 static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1327 unsigned long action, void *hcpu) 1328 { 1329 long cpu = (long)hcpu; 1330 int err = 0; 1331 1332 switch (action) { 1333 case CPU_UP_PREPARE: 1334 case CPU_UP_PREPARE_FROZEN: 1335 mutex_lock(&cache_chain_mutex); 1336 err = cpuup_prepare(cpu); 1337 mutex_unlock(&cache_chain_mutex); 1338 break; 1339 case CPU_ONLINE: 1340 case CPU_ONLINE_FROZEN: 1341 start_cpu_timer(cpu); 1342 break; 1343 #ifdef CONFIG_HOTPLUG_CPU 1344 case CPU_DOWN_PREPARE: 1345 case CPU_DOWN_PREPARE_FROZEN: 1346 /* 1347 * Shutdown cache reaper. Note that the cache_chain_mutex is 1348 * held so that if cache_reap() is invoked it cannot do 1349 * anything expensive but will only modify reap_work 1350 * and reschedule the timer. 1351 */ 1352 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1353 /* Now the cache_reaper is guaranteed to be not running. */ 1354 per_cpu(reap_work, cpu).work.func = NULL; 1355 break; 1356 case CPU_DOWN_FAILED: 1357 case CPU_DOWN_FAILED_FROZEN: 1358 start_cpu_timer(cpu); 1359 break; 1360 case CPU_DEAD: 1361 case CPU_DEAD_FROZEN: 1362 /* 1363 * Even if all the cpus of a node are down, we don't free the 1364 * kmem_list3 of any cache. This to avoid a race between 1365 * cpu_down, and a kmalloc allocation from another cpu for 1366 * memory from the node of the cpu going down. The list3 1367 * structure is usually allocated from kmem_cache_create() and 1368 * gets destroyed at kmem_cache_destroy(). 1369 */ 1370 /* fall through */ 1371 #endif 1372 case CPU_UP_CANCELED: 1373 case CPU_UP_CANCELED_FROZEN: 1374 mutex_lock(&cache_chain_mutex); 1375 cpuup_canceled(cpu); 1376 mutex_unlock(&cache_chain_mutex); 1377 break; 1378 } 1379 return err ? NOTIFY_BAD : NOTIFY_OK; 1380 } 1381 1382 static struct notifier_block __cpuinitdata cpucache_notifier = { 1383 &cpuup_callback, NULL, 0 1384 }; 1385 1386 /* 1387 * swap the static kmem_list3 with kmalloced memory 1388 */ 1389 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1390 int nodeid) 1391 { 1392 struct kmem_list3 *ptr; 1393 1394 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1395 BUG_ON(!ptr); 1396 1397 local_irq_disable(); 1398 memcpy(ptr, list, sizeof(struct kmem_list3)); 1399 /* 1400 * Do not assume that spinlocks can be initialized via memcpy: 1401 */ 1402 spin_lock_init(&ptr->list_lock); 1403 1404 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1405 cachep->nodelists[nodeid] = ptr; 1406 local_irq_enable(); 1407 } 1408 1409 /* 1410 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1411 * size of kmem_list3. 1412 */ 1413 static void __init set_up_list3s(struct kmem_cache *cachep, int index) 1414 { 1415 int node; 1416 1417 for_each_online_node(node) { 1418 cachep->nodelists[node] = &initkmem_list3[index + node]; 1419 cachep->nodelists[node]->next_reap = jiffies + 1420 REAPTIMEOUT_LIST3 + 1421 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1422 } 1423 } 1424 1425 /* 1426 * Initialisation. Called after the page allocator have been initialised and 1427 * before smp_init(). 1428 */ 1429 void __init kmem_cache_init(void) 1430 { 1431 size_t left_over; 1432 struct cache_sizes *sizes; 1433 struct cache_names *names; 1434 int i; 1435 int order; 1436 int node; 1437 1438 if (num_possible_nodes() == 1) { 1439 use_alien_caches = 0; 1440 numa_platform = 0; 1441 } 1442 1443 for (i = 0; i < NUM_INIT_LISTS; i++) { 1444 kmem_list3_init(&initkmem_list3[i]); 1445 if (i < MAX_NUMNODES) 1446 cache_cache.nodelists[i] = NULL; 1447 } 1448 set_up_list3s(&cache_cache, CACHE_CACHE); 1449 1450 /* 1451 * Fragmentation resistance on low memory - only use bigger 1452 * page orders on machines with more than 32MB of memory. 1453 */ 1454 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1455 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1456 1457 /* Bootstrap is tricky, because several objects are allocated 1458 * from caches that do not exist yet: 1459 * 1) initialize the cache_cache cache: it contains the struct 1460 * kmem_cache structures of all caches, except cache_cache itself: 1461 * cache_cache is statically allocated. 1462 * Initially an __init data area is used for the head array and the 1463 * kmem_list3 structures, it's replaced with a kmalloc allocated 1464 * array at the end of the bootstrap. 1465 * 2) Create the first kmalloc cache. 1466 * The struct kmem_cache for the new cache is allocated normally. 1467 * An __init data area is used for the head array. 1468 * 3) Create the remaining kmalloc caches, with minimally sized 1469 * head arrays. 1470 * 4) Replace the __init data head arrays for cache_cache and the first 1471 * kmalloc cache with kmalloc allocated arrays. 1472 * 5) Replace the __init data for kmem_list3 for cache_cache and 1473 * the other cache's with kmalloc allocated memory. 1474 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1475 */ 1476 1477 node = numa_node_id(); 1478 1479 /* 1) create the cache_cache */ 1480 INIT_LIST_HEAD(&cache_chain); 1481 list_add(&cache_cache.next, &cache_chain); 1482 cache_cache.colour_off = cache_line_size(); 1483 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1484 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1485 1486 /* 1487 * struct kmem_cache size depends on nr_node_ids, which 1488 * can be less than MAX_NUMNODES. 1489 */ 1490 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + 1491 nr_node_ids * sizeof(struct kmem_list3 *); 1492 #if DEBUG 1493 cache_cache.obj_size = cache_cache.buffer_size; 1494 #endif 1495 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1496 cache_line_size()); 1497 cache_cache.reciprocal_buffer_size = 1498 reciprocal_value(cache_cache.buffer_size); 1499 1500 for (order = 0; order < MAX_ORDER; order++) { 1501 cache_estimate(order, cache_cache.buffer_size, 1502 cache_line_size(), 0, &left_over, &cache_cache.num); 1503 if (cache_cache.num) 1504 break; 1505 } 1506 BUG_ON(!cache_cache.num); 1507 cache_cache.gfporder = order; 1508 cache_cache.colour = left_over / cache_cache.colour_off; 1509 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1510 sizeof(struct slab), cache_line_size()); 1511 1512 /* 2+3) create the kmalloc caches */ 1513 sizes = malloc_sizes; 1514 names = cache_names; 1515 1516 /* 1517 * Initialize the caches that provide memory for the array cache and the 1518 * kmem_list3 structures first. Without this, further allocations will 1519 * bug. 1520 */ 1521 1522 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1523 sizes[INDEX_AC].cs_size, 1524 ARCH_KMALLOC_MINALIGN, 1525 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1526 NULL); 1527 1528 if (INDEX_AC != INDEX_L3) { 1529 sizes[INDEX_L3].cs_cachep = 1530 kmem_cache_create(names[INDEX_L3].name, 1531 sizes[INDEX_L3].cs_size, 1532 ARCH_KMALLOC_MINALIGN, 1533 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1534 NULL); 1535 } 1536 1537 slab_early_init = 0; 1538 1539 while (sizes->cs_size != ULONG_MAX) { 1540 /* 1541 * For performance, all the general caches are L1 aligned. 1542 * This should be particularly beneficial on SMP boxes, as it 1543 * eliminates "false sharing". 1544 * Note for systems short on memory removing the alignment will 1545 * allow tighter packing of the smaller caches. 1546 */ 1547 if (!sizes->cs_cachep) { 1548 sizes->cs_cachep = kmem_cache_create(names->name, 1549 sizes->cs_size, 1550 ARCH_KMALLOC_MINALIGN, 1551 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1552 NULL); 1553 } 1554 #ifdef CONFIG_ZONE_DMA 1555 sizes->cs_dmacachep = kmem_cache_create( 1556 names->name_dma, 1557 sizes->cs_size, 1558 ARCH_KMALLOC_MINALIGN, 1559 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1560 SLAB_PANIC, 1561 NULL); 1562 #endif 1563 sizes++; 1564 names++; 1565 } 1566 /* 4) Replace the bootstrap head arrays */ 1567 { 1568 struct array_cache *ptr; 1569 1570 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1571 1572 local_irq_disable(); 1573 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1574 memcpy(ptr, cpu_cache_get(&cache_cache), 1575 sizeof(struct arraycache_init)); 1576 /* 1577 * Do not assume that spinlocks can be initialized via memcpy: 1578 */ 1579 spin_lock_init(&ptr->lock); 1580 1581 cache_cache.array[smp_processor_id()] = ptr; 1582 local_irq_enable(); 1583 1584 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1585 1586 local_irq_disable(); 1587 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1588 != &initarray_generic.cache); 1589 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1590 sizeof(struct arraycache_init)); 1591 /* 1592 * Do not assume that spinlocks can be initialized via memcpy: 1593 */ 1594 spin_lock_init(&ptr->lock); 1595 1596 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1597 ptr; 1598 local_irq_enable(); 1599 } 1600 /* 5) Replace the bootstrap kmem_list3's */ 1601 { 1602 int nid; 1603 1604 for_each_online_node(nid) { 1605 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1606 1607 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1608 &initkmem_list3[SIZE_AC + nid], nid); 1609 1610 if (INDEX_AC != INDEX_L3) { 1611 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1612 &initkmem_list3[SIZE_L3 + nid], nid); 1613 } 1614 } 1615 } 1616 1617 /* 6) resize the head arrays to their final sizes */ 1618 { 1619 struct kmem_cache *cachep; 1620 mutex_lock(&cache_chain_mutex); 1621 list_for_each_entry(cachep, &cache_chain, next) 1622 if (enable_cpucache(cachep)) 1623 BUG(); 1624 mutex_unlock(&cache_chain_mutex); 1625 } 1626 1627 /* Annotate slab for lockdep -- annotate the malloc caches */ 1628 init_lock_keys(); 1629 1630 1631 /* Done! */ 1632 g_cpucache_up = FULL; 1633 1634 /* 1635 * Register a cpu startup notifier callback that initializes 1636 * cpu_cache_get for all new cpus 1637 */ 1638 register_cpu_notifier(&cpucache_notifier); 1639 1640 /* 1641 * The reap timers are started later, with a module init call: That part 1642 * of the kernel is not yet operational. 1643 */ 1644 } 1645 1646 static int __init cpucache_init(void) 1647 { 1648 int cpu; 1649 1650 /* 1651 * Register the timers that return unneeded pages to the page allocator 1652 */ 1653 for_each_online_cpu(cpu) 1654 start_cpu_timer(cpu); 1655 return 0; 1656 } 1657 __initcall(cpucache_init); 1658 1659 /* 1660 * Interface to system's page allocator. No need to hold the cache-lock. 1661 * 1662 * If we requested dmaable memory, we will get it. Even if we 1663 * did not request dmaable memory, we might get it, but that 1664 * would be relatively rare and ignorable. 1665 */ 1666 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1667 { 1668 struct page *page; 1669 int nr_pages; 1670 int i; 1671 1672 #ifndef CONFIG_MMU 1673 /* 1674 * Nommu uses slab's for process anonymous memory allocations, and thus 1675 * requires __GFP_COMP to properly refcount higher order allocations 1676 */ 1677 flags |= __GFP_COMP; 1678 #endif 1679 1680 flags |= cachep->gfpflags; 1681 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1682 flags |= __GFP_RECLAIMABLE; 1683 1684 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1685 if (!page) 1686 return NULL; 1687 1688 nr_pages = (1 << cachep->gfporder); 1689 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1690 add_zone_page_state(page_zone(page), 1691 NR_SLAB_RECLAIMABLE, nr_pages); 1692 else 1693 add_zone_page_state(page_zone(page), 1694 NR_SLAB_UNRECLAIMABLE, nr_pages); 1695 for (i = 0; i < nr_pages; i++) 1696 __SetPageSlab(page + i); 1697 return page_address(page); 1698 } 1699 1700 /* 1701 * Interface to system's page release. 1702 */ 1703 static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1704 { 1705 unsigned long i = (1 << cachep->gfporder); 1706 struct page *page = virt_to_page(addr); 1707 const unsigned long nr_freed = i; 1708 1709 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1710 sub_zone_page_state(page_zone(page), 1711 NR_SLAB_RECLAIMABLE, nr_freed); 1712 else 1713 sub_zone_page_state(page_zone(page), 1714 NR_SLAB_UNRECLAIMABLE, nr_freed); 1715 while (i--) { 1716 BUG_ON(!PageSlab(page)); 1717 __ClearPageSlab(page); 1718 page++; 1719 } 1720 if (current->reclaim_state) 1721 current->reclaim_state->reclaimed_slab += nr_freed; 1722 free_pages((unsigned long)addr, cachep->gfporder); 1723 } 1724 1725 static void kmem_rcu_free(struct rcu_head *head) 1726 { 1727 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1728 struct kmem_cache *cachep = slab_rcu->cachep; 1729 1730 kmem_freepages(cachep, slab_rcu->addr); 1731 if (OFF_SLAB(cachep)) 1732 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1733 } 1734 1735 #if DEBUG 1736 1737 #ifdef CONFIG_DEBUG_PAGEALLOC 1738 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1739 unsigned long caller) 1740 { 1741 int size = obj_size(cachep); 1742 1743 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1744 1745 if (size < 5 * sizeof(unsigned long)) 1746 return; 1747 1748 *addr++ = 0x12345678; 1749 *addr++ = caller; 1750 *addr++ = smp_processor_id(); 1751 size -= 3 * sizeof(unsigned long); 1752 { 1753 unsigned long *sptr = &caller; 1754 unsigned long svalue; 1755 1756 while (!kstack_end(sptr)) { 1757 svalue = *sptr++; 1758 if (kernel_text_address(svalue)) { 1759 *addr++ = svalue; 1760 size -= sizeof(unsigned long); 1761 if (size <= sizeof(unsigned long)) 1762 break; 1763 } 1764 } 1765 1766 } 1767 *addr++ = 0x87654321; 1768 } 1769 #endif 1770 1771 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1772 { 1773 int size = obj_size(cachep); 1774 addr = &((char *)addr)[obj_offset(cachep)]; 1775 1776 memset(addr, val, size); 1777 *(unsigned char *)(addr + size - 1) = POISON_END; 1778 } 1779 1780 static void dump_line(char *data, int offset, int limit) 1781 { 1782 int i; 1783 unsigned char error = 0; 1784 int bad_count = 0; 1785 1786 printk(KERN_ERR "%03x:", offset); 1787 for (i = 0; i < limit; i++) { 1788 if (data[offset + i] != POISON_FREE) { 1789 error = data[offset + i]; 1790 bad_count++; 1791 } 1792 printk(" %02x", (unsigned char)data[offset + i]); 1793 } 1794 printk("\n"); 1795 1796 if (bad_count == 1) { 1797 error ^= POISON_FREE; 1798 if (!(error & (error - 1))) { 1799 printk(KERN_ERR "Single bit error detected. Probably " 1800 "bad RAM.\n"); 1801 #ifdef CONFIG_X86 1802 printk(KERN_ERR "Run memtest86+ or a similar memory " 1803 "test tool.\n"); 1804 #else 1805 printk(KERN_ERR "Run a memory test tool.\n"); 1806 #endif 1807 } 1808 } 1809 } 1810 #endif 1811 1812 #if DEBUG 1813 1814 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1815 { 1816 int i, size; 1817 char *realobj; 1818 1819 if (cachep->flags & SLAB_RED_ZONE) { 1820 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1821 *dbg_redzone1(cachep, objp), 1822 *dbg_redzone2(cachep, objp)); 1823 } 1824 1825 if (cachep->flags & SLAB_STORE_USER) { 1826 printk(KERN_ERR "Last user: [<%p>]", 1827 *dbg_userword(cachep, objp)); 1828 print_symbol("(%s)", 1829 (unsigned long)*dbg_userword(cachep, objp)); 1830 printk("\n"); 1831 } 1832 realobj = (char *)objp + obj_offset(cachep); 1833 size = obj_size(cachep); 1834 for (i = 0; i < size && lines; i += 16, lines--) { 1835 int limit; 1836 limit = 16; 1837 if (i + limit > size) 1838 limit = size - i; 1839 dump_line(realobj, i, limit); 1840 } 1841 } 1842 1843 static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1844 { 1845 char *realobj; 1846 int size, i; 1847 int lines = 0; 1848 1849 realobj = (char *)objp + obj_offset(cachep); 1850 size = obj_size(cachep); 1851 1852 for (i = 0; i < size; i++) { 1853 char exp = POISON_FREE; 1854 if (i == size - 1) 1855 exp = POISON_END; 1856 if (realobj[i] != exp) { 1857 int limit; 1858 /* Mismatch ! */ 1859 /* Print header */ 1860 if (lines == 0) { 1861 printk(KERN_ERR 1862 "Slab corruption: %s start=%p, len=%d\n", 1863 cachep->name, realobj, size); 1864 print_objinfo(cachep, objp, 0); 1865 } 1866 /* Hexdump the affected line */ 1867 i = (i / 16) * 16; 1868 limit = 16; 1869 if (i + limit > size) 1870 limit = size - i; 1871 dump_line(realobj, i, limit); 1872 i += 16; 1873 lines++; 1874 /* Limit to 5 lines */ 1875 if (lines > 5) 1876 break; 1877 } 1878 } 1879 if (lines != 0) { 1880 /* Print some data about the neighboring objects, if they 1881 * exist: 1882 */ 1883 struct slab *slabp = virt_to_slab(objp); 1884 unsigned int objnr; 1885 1886 objnr = obj_to_index(cachep, slabp, objp); 1887 if (objnr) { 1888 objp = index_to_obj(cachep, slabp, objnr - 1); 1889 realobj = (char *)objp + obj_offset(cachep); 1890 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1891 realobj, size); 1892 print_objinfo(cachep, objp, 2); 1893 } 1894 if (objnr + 1 < cachep->num) { 1895 objp = index_to_obj(cachep, slabp, objnr + 1); 1896 realobj = (char *)objp + obj_offset(cachep); 1897 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1898 realobj, size); 1899 print_objinfo(cachep, objp, 2); 1900 } 1901 } 1902 } 1903 #endif 1904 1905 #if DEBUG 1906 /** 1907 * slab_destroy_objs - destroy a slab and its objects 1908 * @cachep: cache pointer being destroyed 1909 * @slabp: slab pointer being destroyed 1910 * 1911 * Call the registered destructor for each object in a slab that is being 1912 * destroyed. 1913 */ 1914 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1915 { 1916 int i; 1917 for (i = 0; i < cachep->num; i++) { 1918 void *objp = index_to_obj(cachep, slabp, i); 1919 1920 if (cachep->flags & SLAB_POISON) { 1921 #ifdef CONFIG_DEBUG_PAGEALLOC 1922 if (cachep->buffer_size % PAGE_SIZE == 0 && 1923 OFF_SLAB(cachep)) 1924 kernel_map_pages(virt_to_page(objp), 1925 cachep->buffer_size / PAGE_SIZE, 1); 1926 else 1927 check_poison_obj(cachep, objp); 1928 #else 1929 check_poison_obj(cachep, objp); 1930 #endif 1931 } 1932 if (cachep->flags & SLAB_RED_ZONE) { 1933 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1934 slab_error(cachep, "start of a freed object " 1935 "was overwritten"); 1936 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1937 slab_error(cachep, "end of a freed object " 1938 "was overwritten"); 1939 } 1940 } 1941 } 1942 #else 1943 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1944 { 1945 } 1946 #endif 1947 1948 /** 1949 * slab_destroy - destroy and release all objects in a slab 1950 * @cachep: cache pointer being destroyed 1951 * @slabp: slab pointer being destroyed 1952 * 1953 * Destroy all the objs in a slab, and release the mem back to the system. 1954 * Before calling the slab must have been unlinked from the cache. The 1955 * cache-lock is not held/needed. 1956 */ 1957 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1958 { 1959 void *addr = slabp->s_mem - slabp->colouroff; 1960 1961 slab_destroy_objs(cachep, slabp); 1962 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1963 struct slab_rcu *slab_rcu; 1964 1965 slab_rcu = (struct slab_rcu *)slabp; 1966 slab_rcu->cachep = cachep; 1967 slab_rcu->addr = addr; 1968 call_rcu(&slab_rcu->head, kmem_rcu_free); 1969 } else { 1970 kmem_freepages(cachep, addr); 1971 if (OFF_SLAB(cachep)) 1972 kmem_cache_free(cachep->slabp_cache, slabp); 1973 } 1974 } 1975 1976 static void __kmem_cache_destroy(struct kmem_cache *cachep) 1977 { 1978 int i; 1979 struct kmem_list3 *l3; 1980 1981 for_each_online_cpu(i) 1982 kfree(cachep->array[i]); 1983 1984 /* NUMA: free the list3 structures */ 1985 for_each_online_node(i) { 1986 l3 = cachep->nodelists[i]; 1987 if (l3) { 1988 kfree(l3->shared); 1989 free_alien_cache(l3->alien); 1990 kfree(l3); 1991 } 1992 } 1993 kmem_cache_free(&cache_cache, cachep); 1994 } 1995 1996 1997 /** 1998 * calculate_slab_order - calculate size (page order) of slabs 1999 * @cachep: pointer to the cache that is being created 2000 * @size: size of objects to be created in this cache. 2001 * @align: required alignment for the objects. 2002 * @flags: slab allocation flags 2003 * 2004 * Also calculates the number of objects per slab. 2005 * 2006 * This could be made much more intelligent. For now, try to avoid using 2007 * high order pages for slabs. When the gfp() functions are more friendly 2008 * towards high-order requests, this should be changed. 2009 */ 2010 static size_t calculate_slab_order(struct kmem_cache *cachep, 2011 size_t size, size_t align, unsigned long flags) 2012 { 2013 unsigned long offslab_limit; 2014 size_t left_over = 0; 2015 int gfporder; 2016 2017 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 2018 unsigned int num; 2019 size_t remainder; 2020 2021 cache_estimate(gfporder, size, align, flags, &remainder, &num); 2022 if (!num) 2023 continue; 2024 2025 if (flags & CFLGS_OFF_SLAB) { 2026 /* 2027 * Max number of objs-per-slab for caches which 2028 * use off-slab slabs. Needed to avoid a possible 2029 * looping condition in cache_grow(). 2030 */ 2031 offslab_limit = size - sizeof(struct slab); 2032 offslab_limit /= sizeof(kmem_bufctl_t); 2033 2034 if (num > offslab_limit) 2035 break; 2036 } 2037 2038 /* Found something acceptable - save it away */ 2039 cachep->num = num; 2040 cachep->gfporder = gfporder; 2041 left_over = remainder; 2042 2043 /* 2044 * A VFS-reclaimable slab tends to have most allocations 2045 * as GFP_NOFS and we really don't want to have to be allocating 2046 * higher-order pages when we are unable to shrink dcache. 2047 */ 2048 if (flags & SLAB_RECLAIM_ACCOUNT) 2049 break; 2050 2051 /* 2052 * Large number of objects is good, but very large slabs are 2053 * currently bad for the gfp()s. 2054 */ 2055 if (gfporder >= slab_break_gfp_order) 2056 break; 2057 2058 /* 2059 * Acceptable internal fragmentation? 2060 */ 2061 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2062 break; 2063 } 2064 return left_over; 2065 } 2066 2067 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2068 { 2069 if (g_cpucache_up == FULL) 2070 return enable_cpucache(cachep); 2071 2072 if (g_cpucache_up == NONE) { 2073 /* 2074 * Note: the first kmem_cache_create must create the cache 2075 * that's used by kmalloc(24), otherwise the creation of 2076 * further caches will BUG(). 2077 */ 2078 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2079 2080 /* 2081 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2082 * the first cache, then we need to set up all its list3s, 2083 * otherwise the creation of further caches will BUG(). 2084 */ 2085 set_up_list3s(cachep, SIZE_AC); 2086 if (INDEX_AC == INDEX_L3) 2087 g_cpucache_up = PARTIAL_L3; 2088 else 2089 g_cpucache_up = PARTIAL_AC; 2090 } else { 2091 cachep->array[smp_processor_id()] = 2092 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2093 2094 if (g_cpucache_up == PARTIAL_AC) { 2095 set_up_list3s(cachep, SIZE_L3); 2096 g_cpucache_up = PARTIAL_L3; 2097 } else { 2098 int node; 2099 for_each_online_node(node) { 2100 cachep->nodelists[node] = 2101 kmalloc_node(sizeof(struct kmem_list3), 2102 GFP_KERNEL, node); 2103 BUG_ON(!cachep->nodelists[node]); 2104 kmem_list3_init(cachep->nodelists[node]); 2105 } 2106 } 2107 } 2108 cachep->nodelists[numa_node_id()]->next_reap = 2109 jiffies + REAPTIMEOUT_LIST3 + 2110 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2111 2112 cpu_cache_get(cachep)->avail = 0; 2113 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2114 cpu_cache_get(cachep)->batchcount = 1; 2115 cpu_cache_get(cachep)->touched = 0; 2116 cachep->batchcount = 1; 2117 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2118 return 0; 2119 } 2120 2121 /** 2122 * kmem_cache_create - Create a cache. 2123 * @name: A string which is used in /proc/slabinfo to identify this cache. 2124 * @size: The size of objects to be created in this cache. 2125 * @align: The required alignment for the objects. 2126 * @flags: SLAB flags 2127 * @ctor: A constructor for the objects. 2128 * 2129 * Returns a ptr to the cache on success, NULL on failure. 2130 * Cannot be called within a int, but can be interrupted. 2131 * The @ctor is run when new pages are allocated by the cache. 2132 * 2133 * @name must be valid until the cache is destroyed. This implies that 2134 * the module calling this has to destroy the cache before getting unloaded. 2135 * 2136 * The flags are 2137 * 2138 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2139 * to catch references to uninitialised memory. 2140 * 2141 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2142 * for buffer overruns. 2143 * 2144 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2145 * cacheline. This can be beneficial if you're counting cycles as closely 2146 * as davem. 2147 */ 2148 struct kmem_cache * 2149 kmem_cache_create (const char *name, size_t size, size_t align, 2150 unsigned long flags, 2151 void (*ctor)(struct kmem_cache *, void *)) 2152 { 2153 size_t left_over, slab_size, ralign; 2154 struct kmem_cache *cachep = NULL, *pc; 2155 2156 /* 2157 * Sanity checks... these are all serious usage bugs. 2158 */ 2159 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2160 size > KMALLOC_MAX_SIZE) { 2161 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2162 name); 2163 BUG(); 2164 } 2165 2166 /* 2167 * We use cache_chain_mutex to ensure a consistent view of 2168 * cpu_online_map as well. Please see cpuup_callback 2169 */ 2170 get_online_cpus(); 2171 mutex_lock(&cache_chain_mutex); 2172 2173 list_for_each_entry(pc, &cache_chain, next) { 2174 char tmp; 2175 int res; 2176 2177 /* 2178 * This happens when the module gets unloaded and doesn't 2179 * destroy its slab cache and no-one else reuses the vmalloc 2180 * area of the module. Print a warning. 2181 */ 2182 res = probe_kernel_address(pc->name, tmp); 2183 if (res) { 2184 printk(KERN_ERR 2185 "SLAB: cache with size %d has lost its name\n", 2186 pc->buffer_size); 2187 continue; 2188 } 2189 2190 if (!strcmp(pc->name, name)) { 2191 printk(KERN_ERR 2192 "kmem_cache_create: duplicate cache %s\n", name); 2193 dump_stack(); 2194 goto oops; 2195 } 2196 } 2197 2198 #if DEBUG 2199 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2200 #if FORCED_DEBUG 2201 /* 2202 * Enable redzoning and last user accounting, except for caches with 2203 * large objects, if the increased size would increase the object size 2204 * above the next power of two: caches with object sizes just above a 2205 * power of two have a significant amount of internal fragmentation. 2206 */ 2207 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2208 2 * sizeof(unsigned long long))) 2209 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2210 if (!(flags & SLAB_DESTROY_BY_RCU)) 2211 flags |= SLAB_POISON; 2212 #endif 2213 if (flags & SLAB_DESTROY_BY_RCU) 2214 BUG_ON(flags & SLAB_POISON); 2215 #endif 2216 /* 2217 * Always checks flags, a caller might be expecting debug support which 2218 * isn't available. 2219 */ 2220 BUG_ON(flags & ~CREATE_MASK); 2221 2222 /* 2223 * Check that size is in terms of words. This is needed to avoid 2224 * unaligned accesses for some archs when redzoning is used, and makes 2225 * sure any on-slab bufctl's are also correctly aligned. 2226 */ 2227 if (size & (BYTES_PER_WORD - 1)) { 2228 size += (BYTES_PER_WORD - 1); 2229 size &= ~(BYTES_PER_WORD - 1); 2230 } 2231 2232 /* calculate the final buffer alignment: */ 2233 2234 /* 1) arch recommendation: can be overridden for debug */ 2235 if (flags & SLAB_HWCACHE_ALIGN) { 2236 /* 2237 * Default alignment: as specified by the arch code. Except if 2238 * an object is really small, then squeeze multiple objects into 2239 * one cacheline. 2240 */ 2241 ralign = cache_line_size(); 2242 while (size <= ralign / 2) 2243 ralign /= 2; 2244 } else { 2245 ralign = BYTES_PER_WORD; 2246 } 2247 2248 /* 2249 * Redzoning and user store require word alignment or possibly larger. 2250 * Note this will be overridden by architecture or caller mandated 2251 * alignment if either is greater than BYTES_PER_WORD. 2252 */ 2253 if (flags & SLAB_STORE_USER) 2254 ralign = BYTES_PER_WORD; 2255 2256 if (flags & SLAB_RED_ZONE) { 2257 ralign = REDZONE_ALIGN; 2258 /* If redzoning, ensure that the second redzone is suitably 2259 * aligned, by adjusting the object size accordingly. */ 2260 size += REDZONE_ALIGN - 1; 2261 size &= ~(REDZONE_ALIGN - 1); 2262 } 2263 2264 /* 2) arch mandated alignment */ 2265 if (ralign < ARCH_SLAB_MINALIGN) { 2266 ralign = ARCH_SLAB_MINALIGN; 2267 } 2268 /* 3) caller mandated alignment */ 2269 if (ralign < align) { 2270 ralign = align; 2271 } 2272 /* disable debug if necessary */ 2273 if (ralign > __alignof__(unsigned long long)) 2274 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2275 /* 2276 * 4) Store it. 2277 */ 2278 align = ralign; 2279 2280 /* Get cache's description obj. */ 2281 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2282 if (!cachep) 2283 goto oops; 2284 2285 #if DEBUG 2286 cachep->obj_size = size; 2287 2288 /* 2289 * Both debugging options require word-alignment which is calculated 2290 * into align above. 2291 */ 2292 if (flags & SLAB_RED_ZONE) { 2293 /* add space for red zone words */ 2294 cachep->obj_offset += sizeof(unsigned long long); 2295 size += 2 * sizeof(unsigned long long); 2296 } 2297 if (flags & SLAB_STORE_USER) { 2298 /* user store requires one word storage behind the end of 2299 * the real object. But if the second red zone needs to be 2300 * aligned to 64 bits, we must allow that much space. 2301 */ 2302 if (flags & SLAB_RED_ZONE) 2303 size += REDZONE_ALIGN; 2304 else 2305 size += BYTES_PER_WORD; 2306 } 2307 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2308 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2309 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2310 cachep->obj_offset += PAGE_SIZE - size; 2311 size = PAGE_SIZE; 2312 } 2313 #endif 2314 #endif 2315 2316 /* 2317 * Determine if the slab management is 'on' or 'off' slab. 2318 * (bootstrapping cannot cope with offslab caches so don't do 2319 * it too early on.) 2320 */ 2321 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2322 /* 2323 * Size is large, assume best to place the slab management obj 2324 * off-slab (should allow better packing of objs). 2325 */ 2326 flags |= CFLGS_OFF_SLAB; 2327 2328 size = ALIGN(size, align); 2329 2330 left_over = calculate_slab_order(cachep, size, align, flags); 2331 2332 if (!cachep->num) { 2333 printk(KERN_ERR 2334 "kmem_cache_create: couldn't create cache %s.\n", name); 2335 kmem_cache_free(&cache_cache, cachep); 2336 cachep = NULL; 2337 goto oops; 2338 } 2339 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2340 + sizeof(struct slab), align); 2341 2342 /* 2343 * If the slab has been placed off-slab, and we have enough space then 2344 * move it on-slab. This is at the expense of any extra colouring. 2345 */ 2346 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2347 flags &= ~CFLGS_OFF_SLAB; 2348 left_over -= slab_size; 2349 } 2350 2351 if (flags & CFLGS_OFF_SLAB) { 2352 /* really off slab. No need for manual alignment */ 2353 slab_size = 2354 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2355 } 2356 2357 cachep->colour_off = cache_line_size(); 2358 /* Offset must be a multiple of the alignment. */ 2359 if (cachep->colour_off < align) 2360 cachep->colour_off = align; 2361 cachep->colour = left_over / cachep->colour_off; 2362 cachep->slab_size = slab_size; 2363 cachep->flags = flags; 2364 cachep->gfpflags = 0; 2365 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2366 cachep->gfpflags |= GFP_DMA; 2367 cachep->buffer_size = size; 2368 cachep->reciprocal_buffer_size = reciprocal_value(size); 2369 2370 if (flags & CFLGS_OFF_SLAB) { 2371 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2372 /* 2373 * This is a possibility for one of the malloc_sizes caches. 2374 * But since we go off slab only for object size greater than 2375 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2376 * this should not happen at all. 2377 * But leave a BUG_ON for some lucky dude. 2378 */ 2379 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2380 } 2381 cachep->ctor = ctor; 2382 cachep->name = name; 2383 2384 if (setup_cpu_cache(cachep)) { 2385 __kmem_cache_destroy(cachep); 2386 cachep = NULL; 2387 goto oops; 2388 } 2389 2390 /* cache setup completed, link it into the list */ 2391 list_add(&cachep->next, &cache_chain); 2392 oops: 2393 if (!cachep && (flags & SLAB_PANIC)) 2394 panic("kmem_cache_create(): failed to create slab `%s'\n", 2395 name); 2396 mutex_unlock(&cache_chain_mutex); 2397 put_online_cpus(); 2398 return cachep; 2399 } 2400 EXPORT_SYMBOL(kmem_cache_create); 2401 2402 #if DEBUG 2403 static void check_irq_off(void) 2404 { 2405 BUG_ON(!irqs_disabled()); 2406 } 2407 2408 static void check_irq_on(void) 2409 { 2410 BUG_ON(irqs_disabled()); 2411 } 2412 2413 static void check_spinlock_acquired(struct kmem_cache *cachep) 2414 { 2415 #ifdef CONFIG_SMP 2416 check_irq_off(); 2417 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2418 #endif 2419 } 2420 2421 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2422 { 2423 #ifdef CONFIG_SMP 2424 check_irq_off(); 2425 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2426 #endif 2427 } 2428 2429 #else 2430 #define check_irq_off() do { } while(0) 2431 #define check_irq_on() do { } while(0) 2432 #define check_spinlock_acquired(x) do { } while(0) 2433 #define check_spinlock_acquired_node(x, y) do { } while(0) 2434 #endif 2435 2436 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2437 struct array_cache *ac, 2438 int force, int node); 2439 2440 static void do_drain(void *arg) 2441 { 2442 struct kmem_cache *cachep = arg; 2443 struct array_cache *ac; 2444 int node = numa_node_id(); 2445 2446 check_irq_off(); 2447 ac = cpu_cache_get(cachep); 2448 spin_lock(&cachep->nodelists[node]->list_lock); 2449 free_block(cachep, ac->entry, ac->avail, node); 2450 spin_unlock(&cachep->nodelists[node]->list_lock); 2451 ac->avail = 0; 2452 } 2453 2454 static void drain_cpu_caches(struct kmem_cache *cachep) 2455 { 2456 struct kmem_list3 *l3; 2457 int node; 2458 2459 on_each_cpu(do_drain, cachep, 1, 1); 2460 check_irq_on(); 2461 for_each_online_node(node) { 2462 l3 = cachep->nodelists[node]; 2463 if (l3 && l3->alien) 2464 drain_alien_cache(cachep, l3->alien); 2465 } 2466 2467 for_each_online_node(node) { 2468 l3 = cachep->nodelists[node]; 2469 if (l3) 2470 drain_array(cachep, l3, l3->shared, 1, node); 2471 } 2472 } 2473 2474 /* 2475 * Remove slabs from the list of free slabs. 2476 * Specify the number of slabs to drain in tofree. 2477 * 2478 * Returns the actual number of slabs released. 2479 */ 2480 static int drain_freelist(struct kmem_cache *cache, 2481 struct kmem_list3 *l3, int tofree) 2482 { 2483 struct list_head *p; 2484 int nr_freed; 2485 struct slab *slabp; 2486 2487 nr_freed = 0; 2488 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2489 2490 spin_lock_irq(&l3->list_lock); 2491 p = l3->slabs_free.prev; 2492 if (p == &l3->slabs_free) { 2493 spin_unlock_irq(&l3->list_lock); 2494 goto out; 2495 } 2496 2497 slabp = list_entry(p, struct slab, list); 2498 #if DEBUG 2499 BUG_ON(slabp->inuse); 2500 #endif 2501 list_del(&slabp->list); 2502 /* 2503 * Safe to drop the lock. The slab is no longer linked 2504 * to the cache. 2505 */ 2506 l3->free_objects -= cache->num; 2507 spin_unlock_irq(&l3->list_lock); 2508 slab_destroy(cache, slabp); 2509 nr_freed++; 2510 } 2511 out: 2512 return nr_freed; 2513 } 2514 2515 /* Called with cache_chain_mutex held to protect against cpu hotplug */ 2516 static int __cache_shrink(struct kmem_cache *cachep) 2517 { 2518 int ret = 0, i = 0; 2519 struct kmem_list3 *l3; 2520 2521 drain_cpu_caches(cachep); 2522 2523 check_irq_on(); 2524 for_each_online_node(i) { 2525 l3 = cachep->nodelists[i]; 2526 if (!l3) 2527 continue; 2528 2529 drain_freelist(cachep, l3, l3->free_objects); 2530 2531 ret += !list_empty(&l3->slabs_full) || 2532 !list_empty(&l3->slabs_partial); 2533 } 2534 return (ret ? 1 : 0); 2535 } 2536 2537 /** 2538 * kmem_cache_shrink - Shrink a cache. 2539 * @cachep: The cache to shrink. 2540 * 2541 * Releases as many slabs as possible for a cache. 2542 * To help debugging, a zero exit status indicates all slabs were released. 2543 */ 2544 int kmem_cache_shrink(struct kmem_cache *cachep) 2545 { 2546 int ret; 2547 BUG_ON(!cachep || in_interrupt()); 2548 2549 get_online_cpus(); 2550 mutex_lock(&cache_chain_mutex); 2551 ret = __cache_shrink(cachep); 2552 mutex_unlock(&cache_chain_mutex); 2553 put_online_cpus(); 2554 return ret; 2555 } 2556 EXPORT_SYMBOL(kmem_cache_shrink); 2557 2558 /** 2559 * kmem_cache_destroy - delete a cache 2560 * @cachep: the cache to destroy 2561 * 2562 * Remove a &struct kmem_cache object from the slab cache. 2563 * 2564 * It is expected this function will be called by a module when it is 2565 * unloaded. This will remove the cache completely, and avoid a duplicate 2566 * cache being allocated each time a module is loaded and unloaded, if the 2567 * module doesn't have persistent in-kernel storage across loads and unloads. 2568 * 2569 * The cache must be empty before calling this function. 2570 * 2571 * The caller must guarantee that noone will allocate memory from the cache 2572 * during the kmem_cache_destroy(). 2573 */ 2574 void kmem_cache_destroy(struct kmem_cache *cachep) 2575 { 2576 BUG_ON(!cachep || in_interrupt()); 2577 2578 /* Find the cache in the chain of caches. */ 2579 get_online_cpus(); 2580 mutex_lock(&cache_chain_mutex); 2581 /* 2582 * the chain is never empty, cache_cache is never destroyed 2583 */ 2584 list_del(&cachep->next); 2585 if (__cache_shrink(cachep)) { 2586 slab_error(cachep, "Can't free all objects"); 2587 list_add(&cachep->next, &cache_chain); 2588 mutex_unlock(&cache_chain_mutex); 2589 put_online_cpus(); 2590 return; 2591 } 2592 2593 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2594 synchronize_rcu(); 2595 2596 __kmem_cache_destroy(cachep); 2597 mutex_unlock(&cache_chain_mutex); 2598 put_online_cpus(); 2599 } 2600 EXPORT_SYMBOL(kmem_cache_destroy); 2601 2602 /* 2603 * Get the memory for a slab management obj. 2604 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2605 * always come from malloc_sizes caches. The slab descriptor cannot 2606 * come from the same cache which is getting created because, 2607 * when we are searching for an appropriate cache for these 2608 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2609 * If we are creating a malloc_sizes cache here it would not be visible to 2610 * kmem_find_general_cachep till the initialization is complete. 2611 * Hence we cannot have slabp_cache same as the original cache. 2612 */ 2613 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2614 int colour_off, gfp_t local_flags, 2615 int nodeid) 2616 { 2617 struct slab *slabp; 2618 2619 if (OFF_SLAB(cachep)) { 2620 /* Slab management obj is off-slab. */ 2621 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2622 local_flags & ~GFP_THISNODE, nodeid); 2623 if (!slabp) 2624 return NULL; 2625 } else { 2626 slabp = objp + colour_off; 2627 colour_off += cachep->slab_size; 2628 } 2629 slabp->inuse = 0; 2630 slabp->colouroff = colour_off; 2631 slabp->s_mem = objp + colour_off; 2632 slabp->nodeid = nodeid; 2633 slabp->free = 0; 2634 return slabp; 2635 } 2636 2637 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2638 { 2639 return (kmem_bufctl_t *) (slabp + 1); 2640 } 2641 2642 static void cache_init_objs(struct kmem_cache *cachep, 2643 struct slab *slabp) 2644 { 2645 int i; 2646 2647 for (i = 0; i < cachep->num; i++) { 2648 void *objp = index_to_obj(cachep, slabp, i); 2649 #if DEBUG 2650 /* need to poison the objs? */ 2651 if (cachep->flags & SLAB_POISON) 2652 poison_obj(cachep, objp, POISON_FREE); 2653 if (cachep->flags & SLAB_STORE_USER) 2654 *dbg_userword(cachep, objp) = NULL; 2655 2656 if (cachep->flags & SLAB_RED_ZONE) { 2657 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2658 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2659 } 2660 /* 2661 * Constructors are not allowed to allocate memory from the same 2662 * cache which they are a constructor for. Otherwise, deadlock. 2663 * They must also be threaded. 2664 */ 2665 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2666 cachep->ctor(cachep, objp + obj_offset(cachep)); 2667 2668 if (cachep->flags & SLAB_RED_ZONE) { 2669 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2670 slab_error(cachep, "constructor overwrote the" 2671 " end of an object"); 2672 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2673 slab_error(cachep, "constructor overwrote the" 2674 " start of an object"); 2675 } 2676 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2677 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2678 kernel_map_pages(virt_to_page(objp), 2679 cachep->buffer_size / PAGE_SIZE, 0); 2680 #else 2681 if (cachep->ctor) 2682 cachep->ctor(cachep, objp); 2683 #endif 2684 slab_bufctl(slabp)[i] = i + 1; 2685 } 2686 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2687 } 2688 2689 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2690 { 2691 if (CONFIG_ZONE_DMA_FLAG) { 2692 if (flags & GFP_DMA) 2693 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2694 else 2695 BUG_ON(cachep->gfpflags & GFP_DMA); 2696 } 2697 } 2698 2699 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2700 int nodeid) 2701 { 2702 void *objp = index_to_obj(cachep, slabp, slabp->free); 2703 kmem_bufctl_t next; 2704 2705 slabp->inuse++; 2706 next = slab_bufctl(slabp)[slabp->free]; 2707 #if DEBUG 2708 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2709 WARN_ON(slabp->nodeid != nodeid); 2710 #endif 2711 slabp->free = next; 2712 2713 return objp; 2714 } 2715 2716 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2717 void *objp, int nodeid) 2718 { 2719 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2720 2721 #if DEBUG 2722 /* Verify that the slab belongs to the intended node */ 2723 WARN_ON(slabp->nodeid != nodeid); 2724 2725 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2726 printk(KERN_ERR "slab: double free detected in cache " 2727 "'%s', objp %p\n", cachep->name, objp); 2728 BUG(); 2729 } 2730 #endif 2731 slab_bufctl(slabp)[objnr] = slabp->free; 2732 slabp->free = objnr; 2733 slabp->inuse--; 2734 } 2735 2736 /* 2737 * Map pages beginning at addr to the given cache and slab. This is required 2738 * for the slab allocator to be able to lookup the cache and slab of a 2739 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2740 */ 2741 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2742 void *addr) 2743 { 2744 int nr_pages; 2745 struct page *page; 2746 2747 page = virt_to_page(addr); 2748 2749 nr_pages = 1; 2750 if (likely(!PageCompound(page))) 2751 nr_pages <<= cache->gfporder; 2752 2753 do { 2754 page_set_cache(page, cache); 2755 page_set_slab(page, slab); 2756 page++; 2757 } while (--nr_pages); 2758 } 2759 2760 /* 2761 * Grow (by 1) the number of slabs within a cache. This is called by 2762 * kmem_cache_alloc() when there are no active objs left in a cache. 2763 */ 2764 static int cache_grow(struct kmem_cache *cachep, 2765 gfp_t flags, int nodeid, void *objp) 2766 { 2767 struct slab *slabp; 2768 size_t offset; 2769 gfp_t local_flags; 2770 struct kmem_list3 *l3; 2771 2772 /* 2773 * Be lazy and only check for valid flags here, keeping it out of the 2774 * critical path in kmem_cache_alloc(). 2775 */ 2776 BUG_ON(flags & GFP_SLAB_BUG_MASK); 2777 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2778 2779 /* Take the l3 list lock to change the colour_next on this node */ 2780 check_irq_off(); 2781 l3 = cachep->nodelists[nodeid]; 2782 spin_lock(&l3->list_lock); 2783 2784 /* Get colour for the slab, and cal the next value. */ 2785 offset = l3->colour_next; 2786 l3->colour_next++; 2787 if (l3->colour_next >= cachep->colour) 2788 l3->colour_next = 0; 2789 spin_unlock(&l3->list_lock); 2790 2791 offset *= cachep->colour_off; 2792 2793 if (local_flags & __GFP_WAIT) 2794 local_irq_enable(); 2795 2796 /* 2797 * The test for missing atomic flag is performed here, rather than 2798 * the more obvious place, simply to reduce the critical path length 2799 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2800 * will eventually be caught here (where it matters). 2801 */ 2802 kmem_flagcheck(cachep, flags); 2803 2804 /* 2805 * Get mem for the objs. Attempt to allocate a physical page from 2806 * 'nodeid'. 2807 */ 2808 if (!objp) 2809 objp = kmem_getpages(cachep, local_flags, nodeid); 2810 if (!objp) 2811 goto failed; 2812 2813 /* Get slab management. */ 2814 slabp = alloc_slabmgmt(cachep, objp, offset, 2815 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2816 if (!slabp) 2817 goto opps1; 2818 2819 slab_map_pages(cachep, slabp, objp); 2820 2821 cache_init_objs(cachep, slabp); 2822 2823 if (local_flags & __GFP_WAIT) 2824 local_irq_disable(); 2825 check_irq_off(); 2826 spin_lock(&l3->list_lock); 2827 2828 /* Make slab active. */ 2829 list_add_tail(&slabp->list, &(l3->slabs_free)); 2830 STATS_INC_GROWN(cachep); 2831 l3->free_objects += cachep->num; 2832 spin_unlock(&l3->list_lock); 2833 return 1; 2834 opps1: 2835 kmem_freepages(cachep, objp); 2836 failed: 2837 if (local_flags & __GFP_WAIT) 2838 local_irq_disable(); 2839 return 0; 2840 } 2841 2842 #if DEBUG 2843 2844 /* 2845 * Perform extra freeing checks: 2846 * - detect bad pointers. 2847 * - POISON/RED_ZONE checking 2848 */ 2849 static void kfree_debugcheck(const void *objp) 2850 { 2851 if (!virt_addr_valid(objp)) { 2852 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2853 (unsigned long)objp); 2854 BUG(); 2855 } 2856 } 2857 2858 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2859 { 2860 unsigned long long redzone1, redzone2; 2861 2862 redzone1 = *dbg_redzone1(cache, obj); 2863 redzone2 = *dbg_redzone2(cache, obj); 2864 2865 /* 2866 * Redzone is ok. 2867 */ 2868 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2869 return; 2870 2871 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2872 slab_error(cache, "double free detected"); 2873 else 2874 slab_error(cache, "memory outside object was overwritten"); 2875 2876 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2877 obj, redzone1, redzone2); 2878 } 2879 2880 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2881 void *caller) 2882 { 2883 struct page *page; 2884 unsigned int objnr; 2885 struct slab *slabp; 2886 2887 BUG_ON(virt_to_cache(objp) != cachep); 2888 2889 objp -= obj_offset(cachep); 2890 kfree_debugcheck(objp); 2891 page = virt_to_head_page(objp); 2892 2893 slabp = page_get_slab(page); 2894 2895 if (cachep->flags & SLAB_RED_ZONE) { 2896 verify_redzone_free(cachep, objp); 2897 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2898 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2899 } 2900 if (cachep->flags & SLAB_STORE_USER) 2901 *dbg_userword(cachep, objp) = caller; 2902 2903 objnr = obj_to_index(cachep, slabp, objp); 2904 2905 BUG_ON(objnr >= cachep->num); 2906 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2907 2908 #ifdef CONFIG_DEBUG_SLAB_LEAK 2909 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2910 #endif 2911 if (cachep->flags & SLAB_POISON) { 2912 #ifdef CONFIG_DEBUG_PAGEALLOC 2913 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2914 store_stackinfo(cachep, objp, (unsigned long)caller); 2915 kernel_map_pages(virt_to_page(objp), 2916 cachep->buffer_size / PAGE_SIZE, 0); 2917 } else { 2918 poison_obj(cachep, objp, POISON_FREE); 2919 } 2920 #else 2921 poison_obj(cachep, objp, POISON_FREE); 2922 #endif 2923 } 2924 return objp; 2925 } 2926 2927 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2928 { 2929 kmem_bufctl_t i; 2930 int entries = 0; 2931 2932 /* Check slab's freelist to see if this obj is there. */ 2933 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2934 entries++; 2935 if (entries > cachep->num || i >= cachep->num) 2936 goto bad; 2937 } 2938 if (entries != cachep->num - slabp->inuse) { 2939 bad: 2940 printk(KERN_ERR "slab: Internal list corruption detected in " 2941 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2942 cachep->name, cachep->num, slabp, slabp->inuse); 2943 for (i = 0; 2944 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2945 i++) { 2946 if (i % 16 == 0) 2947 printk("\n%03x:", i); 2948 printk(" %02x", ((unsigned char *)slabp)[i]); 2949 } 2950 printk("\n"); 2951 BUG(); 2952 } 2953 } 2954 #else 2955 #define kfree_debugcheck(x) do { } while(0) 2956 #define cache_free_debugcheck(x,objp,z) (objp) 2957 #define check_slabp(x,y) do { } while(0) 2958 #endif 2959 2960 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2961 { 2962 int batchcount; 2963 struct kmem_list3 *l3; 2964 struct array_cache *ac; 2965 int node; 2966 2967 retry: 2968 check_irq_off(); 2969 node = numa_node_id(); 2970 ac = cpu_cache_get(cachep); 2971 batchcount = ac->batchcount; 2972 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2973 /* 2974 * If there was little recent activity on this cache, then 2975 * perform only a partial refill. Otherwise we could generate 2976 * refill bouncing. 2977 */ 2978 batchcount = BATCHREFILL_LIMIT; 2979 } 2980 l3 = cachep->nodelists[node]; 2981 2982 BUG_ON(ac->avail > 0 || !l3); 2983 spin_lock(&l3->list_lock); 2984 2985 /* See if we can refill from the shared array */ 2986 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2987 goto alloc_done; 2988 2989 while (batchcount > 0) { 2990 struct list_head *entry; 2991 struct slab *slabp; 2992 /* Get slab alloc is to come from. */ 2993 entry = l3->slabs_partial.next; 2994 if (entry == &l3->slabs_partial) { 2995 l3->free_touched = 1; 2996 entry = l3->slabs_free.next; 2997 if (entry == &l3->slabs_free) 2998 goto must_grow; 2999 } 3000 3001 slabp = list_entry(entry, struct slab, list); 3002 check_slabp(cachep, slabp); 3003 check_spinlock_acquired(cachep); 3004 3005 /* 3006 * The slab was either on partial or free list so 3007 * there must be at least one object available for 3008 * allocation. 3009 */ 3010 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3011 3012 while (slabp->inuse < cachep->num && batchcount--) { 3013 STATS_INC_ALLOCED(cachep); 3014 STATS_INC_ACTIVE(cachep); 3015 STATS_SET_HIGH(cachep); 3016 3017 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 3018 node); 3019 } 3020 check_slabp(cachep, slabp); 3021 3022 /* move slabp to correct slabp list: */ 3023 list_del(&slabp->list); 3024 if (slabp->free == BUFCTL_END) 3025 list_add(&slabp->list, &l3->slabs_full); 3026 else 3027 list_add(&slabp->list, &l3->slabs_partial); 3028 } 3029 3030 must_grow: 3031 l3->free_objects -= ac->avail; 3032 alloc_done: 3033 spin_unlock(&l3->list_lock); 3034 3035 if (unlikely(!ac->avail)) { 3036 int x; 3037 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3038 3039 /* cache_grow can reenable interrupts, then ac could change. */ 3040 ac = cpu_cache_get(cachep); 3041 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3042 return NULL; 3043 3044 if (!ac->avail) /* objects refilled by interrupt? */ 3045 goto retry; 3046 } 3047 ac->touched = 1; 3048 return ac->entry[--ac->avail]; 3049 } 3050 3051 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3052 gfp_t flags) 3053 { 3054 might_sleep_if(flags & __GFP_WAIT); 3055 #if DEBUG 3056 kmem_flagcheck(cachep, flags); 3057 #endif 3058 } 3059 3060 #if DEBUG 3061 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3062 gfp_t flags, void *objp, void *caller) 3063 { 3064 if (!objp) 3065 return objp; 3066 if (cachep->flags & SLAB_POISON) { 3067 #ifdef CONFIG_DEBUG_PAGEALLOC 3068 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3069 kernel_map_pages(virt_to_page(objp), 3070 cachep->buffer_size / PAGE_SIZE, 1); 3071 else 3072 check_poison_obj(cachep, objp); 3073 #else 3074 check_poison_obj(cachep, objp); 3075 #endif 3076 poison_obj(cachep, objp, POISON_INUSE); 3077 } 3078 if (cachep->flags & SLAB_STORE_USER) 3079 *dbg_userword(cachep, objp) = caller; 3080 3081 if (cachep->flags & SLAB_RED_ZONE) { 3082 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3083 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3084 slab_error(cachep, "double free, or memory outside" 3085 " object was overwritten"); 3086 printk(KERN_ERR 3087 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3088 objp, *dbg_redzone1(cachep, objp), 3089 *dbg_redzone2(cachep, objp)); 3090 } 3091 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3092 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3093 } 3094 #ifdef CONFIG_DEBUG_SLAB_LEAK 3095 { 3096 struct slab *slabp; 3097 unsigned objnr; 3098 3099 slabp = page_get_slab(virt_to_head_page(objp)); 3100 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3101 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3102 } 3103 #endif 3104 objp += obj_offset(cachep); 3105 if (cachep->ctor && cachep->flags & SLAB_POISON) 3106 cachep->ctor(cachep, objp); 3107 #if ARCH_SLAB_MINALIGN 3108 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3109 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3110 objp, ARCH_SLAB_MINALIGN); 3111 } 3112 #endif 3113 return objp; 3114 } 3115 #else 3116 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3117 #endif 3118 3119 #ifdef CONFIG_FAILSLAB 3120 3121 static struct failslab_attr { 3122 3123 struct fault_attr attr; 3124 3125 u32 ignore_gfp_wait; 3126 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3127 struct dentry *ignore_gfp_wait_file; 3128 #endif 3129 3130 } failslab = { 3131 .attr = FAULT_ATTR_INITIALIZER, 3132 .ignore_gfp_wait = 1, 3133 }; 3134 3135 static int __init setup_failslab(char *str) 3136 { 3137 return setup_fault_attr(&failslab.attr, str); 3138 } 3139 __setup("failslab=", setup_failslab); 3140 3141 static int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3142 { 3143 if (cachep == &cache_cache) 3144 return 0; 3145 if (flags & __GFP_NOFAIL) 3146 return 0; 3147 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) 3148 return 0; 3149 3150 return should_fail(&failslab.attr, obj_size(cachep)); 3151 } 3152 3153 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3154 3155 static int __init failslab_debugfs(void) 3156 { 3157 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3158 struct dentry *dir; 3159 int err; 3160 3161 err = init_fault_attr_dentries(&failslab.attr, "failslab"); 3162 if (err) 3163 return err; 3164 dir = failslab.attr.dentries.dir; 3165 3166 failslab.ignore_gfp_wait_file = 3167 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3168 &failslab.ignore_gfp_wait); 3169 3170 if (!failslab.ignore_gfp_wait_file) { 3171 err = -ENOMEM; 3172 debugfs_remove(failslab.ignore_gfp_wait_file); 3173 cleanup_fault_attr_dentries(&failslab.attr); 3174 } 3175 3176 return err; 3177 } 3178 3179 late_initcall(failslab_debugfs); 3180 3181 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3182 3183 #else /* CONFIG_FAILSLAB */ 3184 3185 static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3186 { 3187 return 0; 3188 } 3189 3190 #endif /* CONFIG_FAILSLAB */ 3191 3192 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3193 { 3194 void *objp; 3195 struct array_cache *ac; 3196 3197 check_irq_off(); 3198 3199 ac = cpu_cache_get(cachep); 3200 if (likely(ac->avail)) { 3201 STATS_INC_ALLOCHIT(cachep); 3202 ac->touched = 1; 3203 objp = ac->entry[--ac->avail]; 3204 } else { 3205 STATS_INC_ALLOCMISS(cachep); 3206 objp = cache_alloc_refill(cachep, flags); 3207 } 3208 return objp; 3209 } 3210 3211 #ifdef CONFIG_NUMA 3212 /* 3213 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3214 * 3215 * If we are in_interrupt, then process context, including cpusets and 3216 * mempolicy, may not apply and should not be used for allocation policy. 3217 */ 3218 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3219 { 3220 int nid_alloc, nid_here; 3221 3222 if (in_interrupt() || (flags & __GFP_THISNODE)) 3223 return NULL; 3224 nid_alloc = nid_here = numa_node_id(); 3225 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3226 nid_alloc = cpuset_mem_spread_node(); 3227 else if (current->mempolicy) 3228 nid_alloc = slab_node(current->mempolicy); 3229 if (nid_alloc != nid_here) 3230 return ____cache_alloc_node(cachep, flags, nid_alloc); 3231 return NULL; 3232 } 3233 3234 /* 3235 * Fallback function if there was no memory available and no objects on a 3236 * certain node and fall back is permitted. First we scan all the 3237 * available nodelists for available objects. If that fails then we 3238 * perform an allocation without specifying a node. This allows the page 3239 * allocator to do its reclaim / fallback magic. We then insert the 3240 * slab into the proper nodelist and then allocate from it. 3241 */ 3242 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3243 { 3244 struct zonelist *zonelist; 3245 gfp_t local_flags; 3246 struct zone **z; 3247 void *obj = NULL; 3248 int nid; 3249 3250 if (flags & __GFP_THISNODE) 3251 return NULL; 3252 3253 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3254 ->node_zonelists[gfp_zone(flags)]; 3255 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3256 3257 retry: 3258 /* 3259 * Look through allowed nodes for objects available 3260 * from existing per node queues. 3261 */ 3262 for (z = zonelist->zones; *z && !obj; z++) { 3263 nid = zone_to_nid(*z); 3264 3265 if (cpuset_zone_allowed_hardwall(*z, flags) && 3266 cache->nodelists[nid] && 3267 cache->nodelists[nid]->free_objects) 3268 obj = ____cache_alloc_node(cache, 3269 flags | GFP_THISNODE, nid); 3270 } 3271 3272 if (!obj) { 3273 /* 3274 * This allocation will be performed within the constraints 3275 * of the current cpuset / memory policy requirements. 3276 * We may trigger various forms of reclaim on the allowed 3277 * set and go into memory reserves if necessary. 3278 */ 3279 if (local_flags & __GFP_WAIT) 3280 local_irq_enable(); 3281 kmem_flagcheck(cache, flags); 3282 obj = kmem_getpages(cache, local_flags, -1); 3283 if (local_flags & __GFP_WAIT) 3284 local_irq_disable(); 3285 if (obj) { 3286 /* 3287 * Insert into the appropriate per node queues 3288 */ 3289 nid = page_to_nid(virt_to_page(obj)); 3290 if (cache_grow(cache, flags, nid, obj)) { 3291 obj = ____cache_alloc_node(cache, 3292 flags | GFP_THISNODE, nid); 3293 if (!obj) 3294 /* 3295 * Another processor may allocate the 3296 * objects in the slab since we are 3297 * not holding any locks. 3298 */ 3299 goto retry; 3300 } else { 3301 /* cache_grow already freed obj */ 3302 obj = NULL; 3303 } 3304 } 3305 } 3306 return obj; 3307 } 3308 3309 /* 3310 * A interface to enable slab creation on nodeid 3311 */ 3312 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3313 int nodeid) 3314 { 3315 struct list_head *entry; 3316 struct slab *slabp; 3317 struct kmem_list3 *l3; 3318 void *obj; 3319 int x; 3320 3321 l3 = cachep->nodelists[nodeid]; 3322 BUG_ON(!l3); 3323 3324 retry: 3325 check_irq_off(); 3326 spin_lock(&l3->list_lock); 3327 entry = l3->slabs_partial.next; 3328 if (entry == &l3->slabs_partial) { 3329 l3->free_touched = 1; 3330 entry = l3->slabs_free.next; 3331 if (entry == &l3->slabs_free) 3332 goto must_grow; 3333 } 3334 3335 slabp = list_entry(entry, struct slab, list); 3336 check_spinlock_acquired_node(cachep, nodeid); 3337 check_slabp(cachep, slabp); 3338 3339 STATS_INC_NODEALLOCS(cachep); 3340 STATS_INC_ACTIVE(cachep); 3341 STATS_SET_HIGH(cachep); 3342 3343 BUG_ON(slabp->inuse == cachep->num); 3344 3345 obj = slab_get_obj(cachep, slabp, nodeid); 3346 check_slabp(cachep, slabp); 3347 l3->free_objects--; 3348 /* move slabp to correct slabp list: */ 3349 list_del(&slabp->list); 3350 3351 if (slabp->free == BUFCTL_END) 3352 list_add(&slabp->list, &l3->slabs_full); 3353 else 3354 list_add(&slabp->list, &l3->slabs_partial); 3355 3356 spin_unlock(&l3->list_lock); 3357 goto done; 3358 3359 must_grow: 3360 spin_unlock(&l3->list_lock); 3361 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3362 if (x) 3363 goto retry; 3364 3365 return fallback_alloc(cachep, flags); 3366 3367 done: 3368 return obj; 3369 } 3370 3371 /** 3372 * kmem_cache_alloc_node - Allocate an object on the specified node 3373 * @cachep: The cache to allocate from. 3374 * @flags: See kmalloc(). 3375 * @nodeid: node number of the target node. 3376 * @caller: return address of caller, used for debug information 3377 * 3378 * Identical to kmem_cache_alloc but it will allocate memory on the given 3379 * node, which can improve the performance for cpu bound structures. 3380 * 3381 * Fallback to other node is possible if __GFP_THISNODE is not set. 3382 */ 3383 static __always_inline void * 3384 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3385 void *caller) 3386 { 3387 unsigned long save_flags; 3388 void *ptr; 3389 3390 if (should_failslab(cachep, flags)) 3391 return NULL; 3392 3393 cache_alloc_debugcheck_before(cachep, flags); 3394 local_irq_save(save_flags); 3395 3396 if (unlikely(nodeid == -1)) 3397 nodeid = numa_node_id(); 3398 3399 if (unlikely(!cachep->nodelists[nodeid])) { 3400 /* Node not bootstrapped yet */ 3401 ptr = fallback_alloc(cachep, flags); 3402 goto out; 3403 } 3404 3405 if (nodeid == numa_node_id()) { 3406 /* 3407 * Use the locally cached objects if possible. 3408 * However ____cache_alloc does not allow fallback 3409 * to other nodes. It may fail while we still have 3410 * objects on other nodes available. 3411 */ 3412 ptr = ____cache_alloc(cachep, flags); 3413 if (ptr) 3414 goto out; 3415 } 3416 /* ___cache_alloc_node can fall back to other nodes */ 3417 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3418 out: 3419 local_irq_restore(save_flags); 3420 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3421 3422 if (unlikely((flags & __GFP_ZERO) && ptr)) 3423 memset(ptr, 0, obj_size(cachep)); 3424 3425 return ptr; 3426 } 3427 3428 static __always_inline void * 3429 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3430 { 3431 void *objp; 3432 3433 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 3434 objp = alternate_node_alloc(cache, flags); 3435 if (objp) 3436 goto out; 3437 } 3438 objp = ____cache_alloc(cache, flags); 3439 3440 /* 3441 * We may just have run out of memory on the local node. 3442 * ____cache_alloc_node() knows how to locate memory on other nodes 3443 */ 3444 if (!objp) 3445 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3446 3447 out: 3448 return objp; 3449 } 3450 #else 3451 3452 static __always_inline void * 3453 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3454 { 3455 return ____cache_alloc(cachep, flags); 3456 } 3457 3458 #endif /* CONFIG_NUMA */ 3459 3460 static __always_inline void * 3461 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3462 { 3463 unsigned long save_flags; 3464 void *objp; 3465 3466 if (should_failslab(cachep, flags)) 3467 return NULL; 3468 3469 cache_alloc_debugcheck_before(cachep, flags); 3470 local_irq_save(save_flags); 3471 objp = __do_cache_alloc(cachep, flags); 3472 local_irq_restore(save_flags); 3473 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3474 prefetchw(objp); 3475 3476 if (unlikely((flags & __GFP_ZERO) && objp)) 3477 memset(objp, 0, obj_size(cachep)); 3478 3479 return objp; 3480 } 3481 3482 /* 3483 * Caller needs to acquire correct kmem_list's list_lock 3484 */ 3485 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3486 int node) 3487 { 3488 int i; 3489 struct kmem_list3 *l3; 3490 3491 for (i = 0; i < nr_objects; i++) { 3492 void *objp = objpp[i]; 3493 struct slab *slabp; 3494 3495 slabp = virt_to_slab(objp); 3496 l3 = cachep->nodelists[node]; 3497 list_del(&slabp->list); 3498 check_spinlock_acquired_node(cachep, node); 3499 check_slabp(cachep, slabp); 3500 slab_put_obj(cachep, slabp, objp, node); 3501 STATS_DEC_ACTIVE(cachep); 3502 l3->free_objects++; 3503 check_slabp(cachep, slabp); 3504 3505 /* fixup slab chains */ 3506 if (slabp->inuse == 0) { 3507 if (l3->free_objects > l3->free_limit) { 3508 l3->free_objects -= cachep->num; 3509 /* No need to drop any previously held 3510 * lock here, even if we have a off-slab slab 3511 * descriptor it is guaranteed to come from 3512 * a different cache, refer to comments before 3513 * alloc_slabmgmt. 3514 */ 3515 slab_destroy(cachep, slabp); 3516 } else { 3517 list_add(&slabp->list, &l3->slabs_free); 3518 } 3519 } else { 3520 /* Unconditionally move a slab to the end of the 3521 * partial list on free - maximum time for the 3522 * other objects to be freed, too. 3523 */ 3524 list_add_tail(&slabp->list, &l3->slabs_partial); 3525 } 3526 } 3527 } 3528 3529 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3530 { 3531 int batchcount; 3532 struct kmem_list3 *l3; 3533 int node = numa_node_id(); 3534 3535 batchcount = ac->batchcount; 3536 #if DEBUG 3537 BUG_ON(!batchcount || batchcount > ac->avail); 3538 #endif 3539 check_irq_off(); 3540 l3 = cachep->nodelists[node]; 3541 spin_lock(&l3->list_lock); 3542 if (l3->shared) { 3543 struct array_cache *shared_array = l3->shared; 3544 int max = shared_array->limit - shared_array->avail; 3545 if (max) { 3546 if (batchcount > max) 3547 batchcount = max; 3548 memcpy(&(shared_array->entry[shared_array->avail]), 3549 ac->entry, sizeof(void *) * batchcount); 3550 shared_array->avail += batchcount; 3551 goto free_done; 3552 } 3553 } 3554 3555 free_block(cachep, ac->entry, batchcount, node); 3556 free_done: 3557 #if STATS 3558 { 3559 int i = 0; 3560 struct list_head *p; 3561 3562 p = l3->slabs_free.next; 3563 while (p != &(l3->slabs_free)) { 3564 struct slab *slabp; 3565 3566 slabp = list_entry(p, struct slab, list); 3567 BUG_ON(slabp->inuse); 3568 3569 i++; 3570 p = p->next; 3571 } 3572 STATS_SET_FREEABLE(cachep, i); 3573 } 3574 #endif 3575 spin_unlock(&l3->list_lock); 3576 ac->avail -= batchcount; 3577 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3578 } 3579 3580 /* 3581 * Release an obj back to its cache. If the obj has a constructed state, it must 3582 * be in this state _before_ it is released. Called with disabled ints. 3583 */ 3584 static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3585 { 3586 struct array_cache *ac = cpu_cache_get(cachep); 3587 3588 check_irq_off(); 3589 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3590 3591 /* 3592 * Skip calling cache_free_alien() when the platform is not numa. 3593 * This will avoid cache misses that happen while accessing slabp (which 3594 * is per page memory reference) to get nodeid. Instead use a global 3595 * variable to skip the call, which is mostly likely to be present in 3596 * the cache. 3597 */ 3598 if (numa_platform && cache_free_alien(cachep, objp)) 3599 return; 3600 3601 if (likely(ac->avail < ac->limit)) { 3602 STATS_INC_FREEHIT(cachep); 3603 ac->entry[ac->avail++] = objp; 3604 return; 3605 } else { 3606 STATS_INC_FREEMISS(cachep); 3607 cache_flusharray(cachep, ac); 3608 ac->entry[ac->avail++] = objp; 3609 } 3610 } 3611 3612 /** 3613 * kmem_cache_alloc - Allocate an object 3614 * @cachep: The cache to allocate from. 3615 * @flags: See kmalloc(). 3616 * 3617 * Allocate an object from this cache. The flags are only relevant 3618 * if the cache has no available objects. 3619 */ 3620 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3621 { 3622 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3623 } 3624 EXPORT_SYMBOL(kmem_cache_alloc); 3625 3626 /** 3627 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3628 * @cachep: the cache we're checking against 3629 * @ptr: pointer to validate 3630 * 3631 * This verifies that the untrusted pointer looks sane; 3632 * it is _not_ a guarantee that the pointer is actually 3633 * part of the slab cache in question, but it at least 3634 * validates that the pointer can be dereferenced and 3635 * looks half-way sane. 3636 * 3637 * Currently only used for dentry validation. 3638 */ 3639 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3640 { 3641 unsigned long addr = (unsigned long)ptr; 3642 unsigned long min_addr = PAGE_OFFSET; 3643 unsigned long align_mask = BYTES_PER_WORD - 1; 3644 unsigned long size = cachep->buffer_size; 3645 struct page *page; 3646 3647 if (unlikely(addr < min_addr)) 3648 goto out; 3649 if (unlikely(addr > (unsigned long)high_memory - size)) 3650 goto out; 3651 if (unlikely(addr & align_mask)) 3652 goto out; 3653 if (unlikely(!kern_addr_valid(addr))) 3654 goto out; 3655 if (unlikely(!kern_addr_valid(addr + size - 1))) 3656 goto out; 3657 page = virt_to_page(ptr); 3658 if (unlikely(!PageSlab(page))) 3659 goto out; 3660 if (unlikely(page_get_cache(page) != cachep)) 3661 goto out; 3662 return 1; 3663 out: 3664 return 0; 3665 } 3666 3667 #ifdef CONFIG_NUMA 3668 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3669 { 3670 return __cache_alloc_node(cachep, flags, nodeid, 3671 __builtin_return_address(0)); 3672 } 3673 EXPORT_SYMBOL(kmem_cache_alloc_node); 3674 3675 static __always_inline void * 3676 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3677 { 3678 struct kmem_cache *cachep; 3679 3680 cachep = kmem_find_general_cachep(size, flags); 3681 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3682 return cachep; 3683 return kmem_cache_alloc_node(cachep, flags, node); 3684 } 3685 3686 #ifdef CONFIG_DEBUG_SLAB 3687 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3688 { 3689 return __do_kmalloc_node(size, flags, node, 3690 __builtin_return_address(0)); 3691 } 3692 EXPORT_SYMBOL(__kmalloc_node); 3693 3694 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3695 int node, void *caller) 3696 { 3697 return __do_kmalloc_node(size, flags, node, caller); 3698 } 3699 EXPORT_SYMBOL(__kmalloc_node_track_caller); 3700 #else 3701 void *__kmalloc_node(size_t size, gfp_t flags, int node) 3702 { 3703 return __do_kmalloc_node(size, flags, node, NULL); 3704 } 3705 EXPORT_SYMBOL(__kmalloc_node); 3706 #endif /* CONFIG_DEBUG_SLAB */ 3707 #endif /* CONFIG_NUMA */ 3708 3709 /** 3710 * __do_kmalloc - allocate memory 3711 * @size: how many bytes of memory are required. 3712 * @flags: the type of memory to allocate (see kmalloc). 3713 * @caller: function caller for debug tracking of the caller 3714 */ 3715 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3716 void *caller) 3717 { 3718 struct kmem_cache *cachep; 3719 3720 /* If you want to save a few bytes .text space: replace 3721 * __ with kmem_. 3722 * Then kmalloc uses the uninlined functions instead of the inline 3723 * functions. 3724 */ 3725 cachep = __find_general_cachep(size, flags); 3726 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3727 return cachep; 3728 return __cache_alloc(cachep, flags, caller); 3729 } 3730 3731 3732 #ifdef CONFIG_DEBUG_SLAB 3733 void *__kmalloc(size_t size, gfp_t flags) 3734 { 3735 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3736 } 3737 EXPORT_SYMBOL(__kmalloc); 3738 3739 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3740 { 3741 return __do_kmalloc(size, flags, caller); 3742 } 3743 EXPORT_SYMBOL(__kmalloc_track_caller); 3744 3745 #else 3746 void *__kmalloc(size_t size, gfp_t flags) 3747 { 3748 return __do_kmalloc(size, flags, NULL); 3749 } 3750 EXPORT_SYMBOL(__kmalloc); 3751 #endif 3752 3753 /** 3754 * kmem_cache_free - Deallocate an object 3755 * @cachep: The cache the allocation was from. 3756 * @objp: The previously allocated object. 3757 * 3758 * Free an object which was previously allocated from this 3759 * cache. 3760 */ 3761 void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3762 { 3763 unsigned long flags; 3764 3765 local_irq_save(flags); 3766 debug_check_no_locks_freed(objp, obj_size(cachep)); 3767 __cache_free(cachep, objp); 3768 local_irq_restore(flags); 3769 } 3770 EXPORT_SYMBOL(kmem_cache_free); 3771 3772 /** 3773 * kfree - free previously allocated memory 3774 * @objp: pointer returned by kmalloc. 3775 * 3776 * If @objp is NULL, no operation is performed. 3777 * 3778 * Don't free memory not originally allocated by kmalloc() 3779 * or you will run into trouble. 3780 */ 3781 void kfree(const void *objp) 3782 { 3783 struct kmem_cache *c; 3784 unsigned long flags; 3785 3786 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3787 return; 3788 local_irq_save(flags); 3789 kfree_debugcheck(objp); 3790 c = virt_to_cache(objp); 3791 debug_check_no_locks_freed(objp, obj_size(c)); 3792 __cache_free(c, (void *)objp); 3793 local_irq_restore(flags); 3794 } 3795 EXPORT_SYMBOL(kfree); 3796 3797 unsigned int kmem_cache_size(struct kmem_cache *cachep) 3798 { 3799 return obj_size(cachep); 3800 } 3801 EXPORT_SYMBOL(kmem_cache_size); 3802 3803 const char *kmem_cache_name(struct kmem_cache *cachep) 3804 { 3805 return cachep->name; 3806 } 3807 EXPORT_SYMBOL_GPL(kmem_cache_name); 3808 3809 /* 3810 * This initializes kmem_list3 or resizes various caches for all nodes. 3811 */ 3812 static int alloc_kmemlist(struct kmem_cache *cachep) 3813 { 3814 int node; 3815 struct kmem_list3 *l3; 3816 struct array_cache *new_shared; 3817 struct array_cache **new_alien = NULL; 3818 3819 for_each_online_node(node) { 3820 3821 if (use_alien_caches) { 3822 new_alien = alloc_alien_cache(node, cachep->limit); 3823 if (!new_alien) 3824 goto fail; 3825 } 3826 3827 new_shared = NULL; 3828 if (cachep->shared) { 3829 new_shared = alloc_arraycache(node, 3830 cachep->shared*cachep->batchcount, 3831 0xbaadf00d); 3832 if (!new_shared) { 3833 free_alien_cache(new_alien); 3834 goto fail; 3835 } 3836 } 3837 3838 l3 = cachep->nodelists[node]; 3839 if (l3) { 3840 struct array_cache *shared = l3->shared; 3841 3842 spin_lock_irq(&l3->list_lock); 3843 3844 if (shared) 3845 free_block(cachep, shared->entry, 3846 shared->avail, node); 3847 3848 l3->shared = new_shared; 3849 if (!l3->alien) { 3850 l3->alien = new_alien; 3851 new_alien = NULL; 3852 } 3853 l3->free_limit = (1 + nr_cpus_node(node)) * 3854 cachep->batchcount + cachep->num; 3855 spin_unlock_irq(&l3->list_lock); 3856 kfree(shared); 3857 free_alien_cache(new_alien); 3858 continue; 3859 } 3860 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3861 if (!l3) { 3862 free_alien_cache(new_alien); 3863 kfree(new_shared); 3864 goto fail; 3865 } 3866 3867 kmem_list3_init(l3); 3868 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3869 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3870 l3->shared = new_shared; 3871 l3->alien = new_alien; 3872 l3->free_limit = (1 + nr_cpus_node(node)) * 3873 cachep->batchcount + cachep->num; 3874 cachep->nodelists[node] = l3; 3875 } 3876 return 0; 3877 3878 fail: 3879 if (!cachep->next.next) { 3880 /* Cache is not active yet. Roll back what we did */ 3881 node--; 3882 while (node >= 0) { 3883 if (cachep->nodelists[node]) { 3884 l3 = cachep->nodelists[node]; 3885 3886 kfree(l3->shared); 3887 free_alien_cache(l3->alien); 3888 kfree(l3); 3889 cachep->nodelists[node] = NULL; 3890 } 3891 node--; 3892 } 3893 } 3894 return -ENOMEM; 3895 } 3896 3897 struct ccupdate_struct { 3898 struct kmem_cache *cachep; 3899 struct array_cache *new[NR_CPUS]; 3900 }; 3901 3902 static void do_ccupdate_local(void *info) 3903 { 3904 struct ccupdate_struct *new = info; 3905 struct array_cache *old; 3906 3907 check_irq_off(); 3908 old = cpu_cache_get(new->cachep); 3909 3910 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3911 new->new[smp_processor_id()] = old; 3912 } 3913 3914 /* Always called with the cache_chain_mutex held */ 3915 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3916 int batchcount, int shared) 3917 { 3918 struct ccupdate_struct *new; 3919 int i; 3920 3921 new = kzalloc(sizeof(*new), GFP_KERNEL); 3922 if (!new) 3923 return -ENOMEM; 3924 3925 for_each_online_cpu(i) { 3926 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3927 batchcount); 3928 if (!new->new[i]) { 3929 for (i--; i >= 0; i--) 3930 kfree(new->new[i]); 3931 kfree(new); 3932 return -ENOMEM; 3933 } 3934 } 3935 new->cachep = cachep; 3936 3937 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3938 3939 check_irq_on(); 3940 cachep->batchcount = batchcount; 3941 cachep->limit = limit; 3942 cachep->shared = shared; 3943 3944 for_each_online_cpu(i) { 3945 struct array_cache *ccold = new->new[i]; 3946 if (!ccold) 3947 continue; 3948 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3949 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3950 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3951 kfree(ccold); 3952 } 3953 kfree(new); 3954 return alloc_kmemlist(cachep); 3955 } 3956 3957 /* Called with cache_chain_mutex held always */ 3958 static int enable_cpucache(struct kmem_cache *cachep) 3959 { 3960 int err; 3961 int limit, shared; 3962 3963 /* 3964 * The head array serves three purposes: 3965 * - create a LIFO ordering, i.e. return objects that are cache-warm 3966 * - reduce the number of spinlock operations. 3967 * - reduce the number of linked list operations on the slab and 3968 * bufctl chains: array operations are cheaper. 3969 * The numbers are guessed, we should auto-tune as described by 3970 * Bonwick. 3971 */ 3972 if (cachep->buffer_size > 131072) 3973 limit = 1; 3974 else if (cachep->buffer_size > PAGE_SIZE) 3975 limit = 8; 3976 else if (cachep->buffer_size > 1024) 3977 limit = 24; 3978 else if (cachep->buffer_size > 256) 3979 limit = 54; 3980 else 3981 limit = 120; 3982 3983 /* 3984 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3985 * allocation behaviour: Most allocs on one cpu, most free operations 3986 * on another cpu. For these cases, an efficient object passing between 3987 * cpus is necessary. This is provided by a shared array. The array 3988 * replaces Bonwick's magazine layer. 3989 * On uniprocessor, it's functionally equivalent (but less efficient) 3990 * to a larger limit. Thus disabled by default. 3991 */ 3992 shared = 0; 3993 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) 3994 shared = 8; 3995 3996 #if DEBUG 3997 /* 3998 * With debugging enabled, large batchcount lead to excessively long 3999 * periods with disabled local interrupts. Limit the batchcount 4000 */ 4001 if (limit > 32) 4002 limit = 32; 4003 #endif 4004 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 4005 if (err) 4006 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4007 cachep->name, -err); 4008 return err; 4009 } 4010 4011 /* 4012 * Drain an array if it contains any elements taking the l3 lock only if 4013 * necessary. Note that the l3 listlock also protects the array_cache 4014 * if drain_array() is used on the shared array. 4015 */ 4016 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4017 struct array_cache *ac, int force, int node) 4018 { 4019 int tofree; 4020 4021 if (!ac || !ac->avail) 4022 return; 4023 if (ac->touched && !force) { 4024 ac->touched = 0; 4025 } else { 4026 spin_lock_irq(&l3->list_lock); 4027 if (ac->avail) { 4028 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4029 if (tofree > ac->avail) 4030 tofree = (ac->avail + 1) / 2; 4031 free_block(cachep, ac->entry, tofree, node); 4032 ac->avail -= tofree; 4033 memmove(ac->entry, &(ac->entry[tofree]), 4034 sizeof(void *) * ac->avail); 4035 } 4036 spin_unlock_irq(&l3->list_lock); 4037 } 4038 } 4039 4040 /** 4041 * cache_reap - Reclaim memory from caches. 4042 * @w: work descriptor 4043 * 4044 * Called from workqueue/eventd every few seconds. 4045 * Purpose: 4046 * - clear the per-cpu caches for this CPU. 4047 * - return freeable pages to the main free memory pool. 4048 * 4049 * If we cannot acquire the cache chain mutex then just give up - we'll try 4050 * again on the next iteration. 4051 */ 4052 static void cache_reap(struct work_struct *w) 4053 { 4054 struct kmem_cache *searchp; 4055 struct kmem_list3 *l3; 4056 int node = numa_node_id(); 4057 struct delayed_work *work = 4058 container_of(w, struct delayed_work, work); 4059 4060 if (!mutex_trylock(&cache_chain_mutex)) 4061 /* Give up. Setup the next iteration. */ 4062 goto out; 4063 4064 list_for_each_entry(searchp, &cache_chain, next) { 4065 check_irq_on(); 4066 4067 /* 4068 * We only take the l3 lock if absolutely necessary and we 4069 * have established with reasonable certainty that 4070 * we can do some work if the lock was obtained. 4071 */ 4072 l3 = searchp->nodelists[node]; 4073 4074 reap_alien(searchp, l3); 4075 4076 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4077 4078 /* 4079 * These are racy checks but it does not matter 4080 * if we skip one check or scan twice. 4081 */ 4082 if (time_after(l3->next_reap, jiffies)) 4083 goto next; 4084 4085 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4086 4087 drain_array(searchp, l3, l3->shared, 0, node); 4088 4089 if (l3->free_touched) 4090 l3->free_touched = 0; 4091 else { 4092 int freed; 4093 4094 freed = drain_freelist(searchp, l3, (l3->free_limit + 4095 5 * searchp->num - 1) / (5 * searchp->num)); 4096 STATS_ADD_REAPED(searchp, freed); 4097 } 4098 next: 4099 cond_resched(); 4100 } 4101 check_irq_on(); 4102 mutex_unlock(&cache_chain_mutex); 4103 next_reap_node(); 4104 out: 4105 /* Set up the next iteration */ 4106 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4107 } 4108 4109 #ifdef CONFIG_SLABINFO 4110 4111 static void print_slabinfo_header(struct seq_file *m) 4112 { 4113 /* 4114 * Output format version, so at least we can change it 4115 * without _too_ many complaints. 4116 */ 4117 #if STATS 4118 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4119 #else 4120 seq_puts(m, "slabinfo - version: 2.1\n"); 4121 #endif 4122 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4123 "<objperslab> <pagesperslab>"); 4124 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4125 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4126 #if STATS 4127 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4128 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4129 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4130 #endif 4131 seq_putc(m, '\n'); 4132 } 4133 4134 static void *s_start(struct seq_file *m, loff_t *pos) 4135 { 4136 loff_t n = *pos; 4137 4138 mutex_lock(&cache_chain_mutex); 4139 if (!n) 4140 print_slabinfo_header(m); 4141 4142 return seq_list_start(&cache_chain, *pos); 4143 } 4144 4145 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4146 { 4147 return seq_list_next(p, &cache_chain, pos); 4148 } 4149 4150 static void s_stop(struct seq_file *m, void *p) 4151 { 4152 mutex_unlock(&cache_chain_mutex); 4153 } 4154 4155 static int s_show(struct seq_file *m, void *p) 4156 { 4157 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4158 struct slab *slabp; 4159 unsigned long active_objs; 4160 unsigned long num_objs; 4161 unsigned long active_slabs = 0; 4162 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4163 const char *name; 4164 char *error = NULL; 4165 int node; 4166 struct kmem_list3 *l3; 4167 4168 active_objs = 0; 4169 num_slabs = 0; 4170 for_each_online_node(node) { 4171 l3 = cachep->nodelists[node]; 4172 if (!l3) 4173 continue; 4174 4175 check_irq_on(); 4176 spin_lock_irq(&l3->list_lock); 4177 4178 list_for_each_entry(slabp, &l3->slabs_full, list) { 4179 if (slabp->inuse != cachep->num && !error) 4180 error = "slabs_full accounting error"; 4181 active_objs += cachep->num; 4182 active_slabs++; 4183 } 4184 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4185 if (slabp->inuse == cachep->num && !error) 4186 error = "slabs_partial inuse accounting error"; 4187 if (!slabp->inuse && !error) 4188 error = "slabs_partial/inuse accounting error"; 4189 active_objs += slabp->inuse; 4190 active_slabs++; 4191 } 4192 list_for_each_entry(slabp, &l3->slabs_free, list) { 4193 if (slabp->inuse && !error) 4194 error = "slabs_free/inuse accounting error"; 4195 num_slabs++; 4196 } 4197 free_objects += l3->free_objects; 4198 if (l3->shared) 4199 shared_avail += l3->shared->avail; 4200 4201 spin_unlock_irq(&l3->list_lock); 4202 } 4203 num_slabs += active_slabs; 4204 num_objs = num_slabs * cachep->num; 4205 if (num_objs - active_objs != free_objects && !error) 4206 error = "free_objects accounting error"; 4207 4208 name = cachep->name; 4209 if (error) 4210 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4211 4212 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4213 name, active_objs, num_objs, cachep->buffer_size, 4214 cachep->num, (1 << cachep->gfporder)); 4215 seq_printf(m, " : tunables %4u %4u %4u", 4216 cachep->limit, cachep->batchcount, cachep->shared); 4217 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4218 active_slabs, num_slabs, shared_avail); 4219 #if STATS 4220 { /* list3 stats */ 4221 unsigned long high = cachep->high_mark; 4222 unsigned long allocs = cachep->num_allocations; 4223 unsigned long grown = cachep->grown; 4224 unsigned long reaped = cachep->reaped; 4225 unsigned long errors = cachep->errors; 4226 unsigned long max_freeable = cachep->max_freeable; 4227 unsigned long node_allocs = cachep->node_allocs; 4228 unsigned long node_frees = cachep->node_frees; 4229 unsigned long overflows = cachep->node_overflow; 4230 4231 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4232 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4233 reaped, errors, max_freeable, node_allocs, 4234 node_frees, overflows); 4235 } 4236 /* cpu stats */ 4237 { 4238 unsigned long allochit = atomic_read(&cachep->allochit); 4239 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4240 unsigned long freehit = atomic_read(&cachep->freehit); 4241 unsigned long freemiss = atomic_read(&cachep->freemiss); 4242 4243 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4244 allochit, allocmiss, freehit, freemiss); 4245 } 4246 #endif 4247 seq_putc(m, '\n'); 4248 return 0; 4249 } 4250 4251 /* 4252 * slabinfo_op - iterator that generates /proc/slabinfo 4253 * 4254 * Output layout: 4255 * cache-name 4256 * num-active-objs 4257 * total-objs 4258 * object size 4259 * num-active-slabs 4260 * total-slabs 4261 * num-pages-per-slab 4262 * + further values on SMP and with statistics enabled 4263 */ 4264 4265 const struct seq_operations slabinfo_op = { 4266 .start = s_start, 4267 .next = s_next, 4268 .stop = s_stop, 4269 .show = s_show, 4270 }; 4271 4272 #define MAX_SLABINFO_WRITE 128 4273 /** 4274 * slabinfo_write - Tuning for the slab allocator 4275 * @file: unused 4276 * @buffer: user buffer 4277 * @count: data length 4278 * @ppos: unused 4279 */ 4280 ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4281 size_t count, loff_t *ppos) 4282 { 4283 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4284 int limit, batchcount, shared, res; 4285 struct kmem_cache *cachep; 4286 4287 if (count > MAX_SLABINFO_WRITE) 4288 return -EINVAL; 4289 if (copy_from_user(&kbuf, buffer, count)) 4290 return -EFAULT; 4291 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4292 4293 tmp = strchr(kbuf, ' '); 4294 if (!tmp) 4295 return -EINVAL; 4296 *tmp = '\0'; 4297 tmp++; 4298 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4299 return -EINVAL; 4300 4301 /* Find the cache in the chain of caches. */ 4302 mutex_lock(&cache_chain_mutex); 4303 res = -EINVAL; 4304 list_for_each_entry(cachep, &cache_chain, next) { 4305 if (!strcmp(cachep->name, kbuf)) { 4306 if (limit < 1 || batchcount < 1 || 4307 batchcount > limit || shared < 0) { 4308 res = 0; 4309 } else { 4310 res = do_tune_cpucache(cachep, limit, 4311 batchcount, shared); 4312 } 4313 break; 4314 } 4315 } 4316 mutex_unlock(&cache_chain_mutex); 4317 if (res >= 0) 4318 res = count; 4319 return res; 4320 } 4321 4322 #ifdef CONFIG_DEBUG_SLAB_LEAK 4323 4324 static void *leaks_start(struct seq_file *m, loff_t *pos) 4325 { 4326 mutex_lock(&cache_chain_mutex); 4327 return seq_list_start(&cache_chain, *pos); 4328 } 4329 4330 static inline int add_caller(unsigned long *n, unsigned long v) 4331 { 4332 unsigned long *p; 4333 int l; 4334 if (!v) 4335 return 1; 4336 l = n[1]; 4337 p = n + 2; 4338 while (l) { 4339 int i = l/2; 4340 unsigned long *q = p + 2 * i; 4341 if (*q == v) { 4342 q[1]++; 4343 return 1; 4344 } 4345 if (*q > v) { 4346 l = i; 4347 } else { 4348 p = q + 2; 4349 l -= i + 1; 4350 } 4351 } 4352 if (++n[1] == n[0]) 4353 return 0; 4354 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4355 p[0] = v; 4356 p[1] = 1; 4357 return 1; 4358 } 4359 4360 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4361 { 4362 void *p; 4363 int i; 4364 if (n[0] == n[1]) 4365 return; 4366 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4367 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4368 continue; 4369 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4370 return; 4371 } 4372 } 4373 4374 static void show_symbol(struct seq_file *m, unsigned long address) 4375 { 4376 #ifdef CONFIG_KALLSYMS 4377 unsigned long offset, size; 4378 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4379 4380 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4381 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4382 if (modname[0]) 4383 seq_printf(m, " [%s]", modname); 4384 return; 4385 } 4386 #endif 4387 seq_printf(m, "%p", (void *)address); 4388 } 4389 4390 static int leaks_show(struct seq_file *m, void *p) 4391 { 4392 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4393 struct slab *slabp; 4394 struct kmem_list3 *l3; 4395 const char *name; 4396 unsigned long *n = m->private; 4397 int node; 4398 int i; 4399 4400 if (!(cachep->flags & SLAB_STORE_USER)) 4401 return 0; 4402 if (!(cachep->flags & SLAB_RED_ZONE)) 4403 return 0; 4404 4405 /* OK, we can do it */ 4406 4407 n[1] = 0; 4408 4409 for_each_online_node(node) { 4410 l3 = cachep->nodelists[node]; 4411 if (!l3) 4412 continue; 4413 4414 check_irq_on(); 4415 spin_lock_irq(&l3->list_lock); 4416 4417 list_for_each_entry(slabp, &l3->slabs_full, list) 4418 handle_slab(n, cachep, slabp); 4419 list_for_each_entry(slabp, &l3->slabs_partial, list) 4420 handle_slab(n, cachep, slabp); 4421 spin_unlock_irq(&l3->list_lock); 4422 } 4423 name = cachep->name; 4424 if (n[0] == n[1]) { 4425 /* Increase the buffer size */ 4426 mutex_unlock(&cache_chain_mutex); 4427 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4428 if (!m->private) { 4429 /* Too bad, we are really out */ 4430 m->private = n; 4431 mutex_lock(&cache_chain_mutex); 4432 return -ENOMEM; 4433 } 4434 *(unsigned long *)m->private = n[0] * 2; 4435 kfree(n); 4436 mutex_lock(&cache_chain_mutex); 4437 /* Now make sure this entry will be retried */ 4438 m->count = m->size; 4439 return 0; 4440 } 4441 for (i = 0; i < n[1]; i++) { 4442 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4443 show_symbol(m, n[2*i+2]); 4444 seq_putc(m, '\n'); 4445 } 4446 4447 return 0; 4448 } 4449 4450 const struct seq_operations slabstats_op = { 4451 .start = leaks_start, 4452 .next = s_next, 4453 .stop = s_stop, 4454 .show = leaks_show, 4455 }; 4456 #endif 4457 #endif 4458 4459 /** 4460 * ksize - get the actual amount of memory allocated for a given object 4461 * @objp: Pointer to the object 4462 * 4463 * kmalloc may internally round up allocations and return more memory 4464 * than requested. ksize() can be used to determine the actual amount of 4465 * memory allocated. The caller may use this additional memory, even though 4466 * a smaller amount of memory was initially specified with the kmalloc call. 4467 * The caller must guarantee that objp points to a valid object previously 4468 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4469 * must not be freed during the duration of the call. 4470 */ 4471 size_t ksize(const void *objp) 4472 { 4473 BUG_ON(!objp); 4474 if (unlikely(objp == ZERO_SIZE_PTR)) 4475 return 0; 4476 4477 return obj_size(virt_to_cache(objp)); 4478 } 4479 EXPORT_SYMBOL(ksize); 4480