xref: /openbmc/linux/mm/slab.c (revision 4f3865fb)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same intializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/config.h>
90 #include	<linux/slab.h>
91 #include	<linux/mm.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/seq_file.h>
99 #include	<linux/notifier.h>
100 #include	<linux/kallsyms.h>
101 #include	<linux/cpu.h>
102 #include	<linux/sysctl.h>
103 #include	<linux/module.h>
104 #include	<linux/rcupdate.h>
105 #include	<linux/string.h>
106 #include	<linux/nodemask.h>
107 #include	<linux/mempolicy.h>
108 #include	<linux/mutex.h>
109 
110 #include	<asm/uaccess.h>
111 #include	<asm/cacheflush.h>
112 #include	<asm/tlbflush.h>
113 #include	<asm/page.h>
114 
115 /*
116  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
117  *		  SLAB_RED_ZONE & SLAB_POISON.
118  *		  0 for faster, smaller code (especially in the critical paths).
119  *
120  * STATS	- 1 to collect stats for /proc/slabinfo.
121  *		  0 for faster, smaller code (especially in the critical paths).
122  *
123  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
124  */
125 
126 #ifdef CONFIG_DEBUG_SLAB
127 #define	DEBUG		1
128 #define	STATS		1
129 #define	FORCED_DEBUG	1
130 #else
131 #define	DEBUG		0
132 #define	STATS		0
133 #define	FORCED_DEBUG	0
134 #endif
135 
136 /* Shouldn't this be in a header file somewhere? */
137 #define	BYTES_PER_WORD		sizeof(void *)
138 
139 #ifndef cache_line_size
140 #define cache_line_size()	L1_CACHE_BYTES
141 #endif
142 
143 #ifndef ARCH_KMALLOC_MINALIGN
144 /*
145  * Enforce a minimum alignment for the kmalloc caches.
146  * Usually, the kmalloc caches are cache_line_size() aligned, except when
147  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
148  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
149  * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
150  * Note that this flag disables some debug features.
151  */
152 #define ARCH_KMALLOC_MINALIGN 0
153 #endif
154 
155 #ifndef ARCH_SLAB_MINALIGN
156 /*
157  * Enforce a minimum alignment for all caches.
158  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
159  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
160  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
161  * some debug features.
162  */
163 #define ARCH_SLAB_MINALIGN 0
164 #endif
165 
166 #ifndef ARCH_KMALLOC_FLAGS
167 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
168 #endif
169 
170 /* Legal flag mask for kmem_cache_create(). */
171 #if DEBUG
172 # define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
173 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
174 			 SLAB_CACHE_DMA | \
175 			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
176 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
177 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
178 #else
179 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
180 			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
181 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
182 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
183 #endif
184 
185 /*
186  * kmem_bufctl_t:
187  *
188  * Bufctl's are used for linking objs within a slab
189  * linked offsets.
190  *
191  * This implementation relies on "struct page" for locating the cache &
192  * slab an object belongs to.
193  * This allows the bufctl structure to be small (one int), but limits
194  * the number of objects a slab (not a cache) can contain when off-slab
195  * bufctls are used. The limit is the size of the largest general cache
196  * that does not use off-slab slabs.
197  * For 32bit archs with 4 kB pages, is this 56.
198  * This is not serious, as it is only for large objects, when it is unwise
199  * to have too many per slab.
200  * Note: This limit can be raised by introducing a general cache whose size
201  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
202  */
203 
204 typedef unsigned int kmem_bufctl_t;
205 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
206 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
207 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
208 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
209 
210 /*
211  * struct slab
212  *
213  * Manages the objs in a slab. Placed either at the beginning of mem allocated
214  * for a slab, or allocated from an general cache.
215  * Slabs are chained into three list: fully used, partial, fully free slabs.
216  */
217 struct slab {
218 	struct list_head list;
219 	unsigned long colouroff;
220 	void *s_mem;		/* including colour offset */
221 	unsigned int inuse;	/* num of objs active in slab */
222 	kmem_bufctl_t free;
223 	unsigned short nodeid;
224 };
225 
226 /*
227  * struct slab_rcu
228  *
229  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
230  * arrange for kmem_freepages to be called via RCU.  This is useful if
231  * we need to approach a kernel structure obliquely, from its address
232  * obtained without the usual locking.  We can lock the structure to
233  * stabilize it and check it's still at the given address, only if we
234  * can be sure that the memory has not been meanwhile reused for some
235  * other kind of object (which our subsystem's lock might corrupt).
236  *
237  * rcu_read_lock before reading the address, then rcu_read_unlock after
238  * taking the spinlock within the structure expected at that address.
239  *
240  * We assume struct slab_rcu can overlay struct slab when destroying.
241  */
242 struct slab_rcu {
243 	struct rcu_head head;
244 	struct kmem_cache *cachep;
245 	void *addr;
246 };
247 
248 /*
249  * struct array_cache
250  *
251  * Purpose:
252  * - LIFO ordering, to hand out cache-warm objects from _alloc
253  * - reduce the number of linked list operations
254  * - reduce spinlock operations
255  *
256  * The limit is stored in the per-cpu structure to reduce the data cache
257  * footprint.
258  *
259  */
260 struct array_cache {
261 	unsigned int avail;
262 	unsigned int limit;
263 	unsigned int batchcount;
264 	unsigned int touched;
265 	spinlock_t lock;
266 	void *entry[0];	/*
267 			 * Must have this definition in here for the proper
268 			 * alignment of array_cache. Also simplifies accessing
269 			 * the entries.
270 			 * [0] is for gcc 2.95. It should really be [].
271 			 */
272 };
273 
274 /*
275  * bootstrap: The caches do not work without cpuarrays anymore, but the
276  * cpuarrays are allocated from the generic caches...
277  */
278 #define BOOT_CPUCACHE_ENTRIES	1
279 struct arraycache_init {
280 	struct array_cache cache;
281 	void *entries[BOOT_CPUCACHE_ENTRIES];
282 };
283 
284 /*
285  * The slab lists for all objects.
286  */
287 struct kmem_list3 {
288 	struct list_head slabs_partial;	/* partial list first, better asm code */
289 	struct list_head slabs_full;
290 	struct list_head slabs_free;
291 	unsigned long free_objects;
292 	unsigned int free_limit;
293 	unsigned int colour_next;	/* Per-node cache coloring */
294 	spinlock_t list_lock;
295 	struct array_cache *shared;	/* shared per node */
296 	struct array_cache **alien;	/* on other nodes */
297 	unsigned long next_reap;	/* updated without locking */
298 	int free_touched;		/* updated without locking */
299 };
300 
301 /*
302  * Need this for bootstrapping a per node allocator.
303  */
304 #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
305 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
306 #define	CACHE_CACHE 0
307 #define	SIZE_AC 1
308 #define	SIZE_L3 (1 + MAX_NUMNODES)
309 
310 /*
311  * This function must be completely optimized away if a constant is passed to
312  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
313  */
314 static __always_inline int index_of(const size_t size)
315 {
316 	extern void __bad_size(void);
317 
318 	if (__builtin_constant_p(size)) {
319 		int i = 0;
320 
321 #define CACHE(x) \
322 	if (size <=x) \
323 		return i; \
324 	else \
325 		i++;
326 #include "linux/kmalloc_sizes.h"
327 #undef CACHE
328 		__bad_size();
329 	} else
330 		__bad_size();
331 	return 0;
332 }
333 
334 #define INDEX_AC index_of(sizeof(struct arraycache_init))
335 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
336 
337 static void kmem_list3_init(struct kmem_list3 *parent)
338 {
339 	INIT_LIST_HEAD(&parent->slabs_full);
340 	INIT_LIST_HEAD(&parent->slabs_partial);
341 	INIT_LIST_HEAD(&parent->slabs_free);
342 	parent->shared = NULL;
343 	parent->alien = NULL;
344 	parent->colour_next = 0;
345 	spin_lock_init(&parent->list_lock);
346 	parent->free_objects = 0;
347 	parent->free_touched = 0;
348 }
349 
350 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
351 	do {								\
352 		INIT_LIST_HEAD(listp);					\
353 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
354 	} while (0)
355 
356 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
357 	do {								\
358 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
359 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
360 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
361 	} while (0)
362 
363 /*
364  * struct kmem_cache
365  *
366  * manages a cache.
367  */
368 
369 struct kmem_cache {
370 /* 1) per-cpu data, touched during every alloc/free */
371 	struct array_cache *array[NR_CPUS];
372 /* 2) Cache tunables. Protected by cache_chain_mutex */
373 	unsigned int batchcount;
374 	unsigned int limit;
375 	unsigned int shared;
376 
377 	unsigned int buffer_size;
378 /* 3) touched by every alloc & free from the backend */
379 	struct kmem_list3 *nodelists[MAX_NUMNODES];
380 
381 	unsigned int flags;		/* constant flags */
382 	unsigned int num;		/* # of objs per slab */
383 
384 /* 4) cache_grow/shrink */
385 	/* order of pgs per slab (2^n) */
386 	unsigned int gfporder;
387 
388 	/* force GFP flags, e.g. GFP_DMA */
389 	gfp_t gfpflags;
390 
391 	size_t colour;			/* cache colouring range */
392 	unsigned int colour_off;	/* colour offset */
393 	struct kmem_cache *slabp_cache;
394 	unsigned int slab_size;
395 	unsigned int dflags;		/* dynamic flags */
396 
397 	/* constructor func */
398 	void (*ctor) (void *, struct kmem_cache *, unsigned long);
399 
400 	/* de-constructor func */
401 	void (*dtor) (void *, struct kmem_cache *, unsigned long);
402 
403 /* 5) cache creation/removal */
404 	const char *name;
405 	struct list_head next;
406 
407 /* 6) statistics */
408 #if STATS
409 	unsigned long num_active;
410 	unsigned long num_allocations;
411 	unsigned long high_mark;
412 	unsigned long grown;
413 	unsigned long reaped;
414 	unsigned long errors;
415 	unsigned long max_freeable;
416 	unsigned long node_allocs;
417 	unsigned long node_frees;
418 	unsigned long node_overflow;
419 	atomic_t allochit;
420 	atomic_t allocmiss;
421 	atomic_t freehit;
422 	atomic_t freemiss;
423 #endif
424 #if DEBUG
425 	/*
426 	 * If debugging is enabled, then the allocator can add additional
427 	 * fields and/or padding to every object. buffer_size contains the total
428 	 * object size including these internal fields, the following two
429 	 * variables contain the offset to the user object and its size.
430 	 */
431 	int obj_offset;
432 	int obj_size;
433 #endif
434 };
435 
436 #define CFLGS_OFF_SLAB		(0x80000000UL)
437 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
438 
439 #define BATCHREFILL_LIMIT	16
440 /*
441  * Optimization question: fewer reaps means less probability for unnessary
442  * cpucache drain/refill cycles.
443  *
444  * OTOH the cpuarrays can contain lots of objects,
445  * which could lock up otherwise freeable slabs.
446  */
447 #define REAPTIMEOUT_CPUC	(2*HZ)
448 #define REAPTIMEOUT_LIST3	(4*HZ)
449 
450 #if STATS
451 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
452 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
453 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
454 #define	STATS_INC_GROWN(x)	((x)->grown++)
455 #define	STATS_INC_REAPED(x)	((x)->reaped++)
456 #define	STATS_SET_HIGH(x)						\
457 	do {								\
458 		if ((x)->num_active > (x)->high_mark)			\
459 			(x)->high_mark = (x)->num_active;		\
460 	} while (0)
461 #define	STATS_INC_ERR(x)	((x)->errors++)
462 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
463 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
464 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
465 #define	STATS_SET_FREEABLE(x, i)					\
466 	do {								\
467 		if ((x)->max_freeable < i)				\
468 			(x)->max_freeable = i;				\
469 	} while (0)
470 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
471 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
472 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
473 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
474 #else
475 #define	STATS_INC_ACTIVE(x)	do { } while (0)
476 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
477 #define	STATS_INC_ALLOCED(x)	do { } while (0)
478 #define	STATS_INC_GROWN(x)	do { } while (0)
479 #define	STATS_INC_REAPED(x)	do { } while (0)
480 #define	STATS_SET_HIGH(x)	do { } while (0)
481 #define	STATS_INC_ERR(x)	do { } while (0)
482 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
483 #define	STATS_INC_NODEFREES(x)	do { } while (0)
484 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
485 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
486 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
487 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
488 #define STATS_INC_FREEHIT(x)	do { } while (0)
489 #define STATS_INC_FREEMISS(x)	do { } while (0)
490 #endif
491 
492 #if DEBUG
493 /*
494  * Magic nums for obj red zoning.
495  * Placed in the first word before and the first word after an obj.
496  */
497 #define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */
498 #define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */
499 
500 /* ...and for poisoning */
501 #define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */
502 #define POISON_FREE	0x6b	/* for use-after-free poisoning */
503 #define	POISON_END	0xa5	/* end-byte of poisoning */
504 
505 /*
506  * memory layout of objects:
507  * 0		: objp
508  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
509  * 		the end of an object is aligned with the end of the real
510  * 		allocation. Catches writes behind the end of the allocation.
511  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
512  * 		redzone word.
513  * cachep->obj_offset: The real object.
514  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
515  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
516  *					[BYTES_PER_WORD long]
517  */
518 static int obj_offset(struct kmem_cache *cachep)
519 {
520 	return cachep->obj_offset;
521 }
522 
523 static int obj_size(struct kmem_cache *cachep)
524 {
525 	return cachep->obj_size;
526 }
527 
528 static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
529 {
530 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
531 	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
532 }
533 
534 static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
535 {
536 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
537 	if (cachep->flags & SLAB_STORE_USER)
538 		return (unsigned long *)(objp + cachep->buffer_size -
539 					 2 * BYTES_PER_WORD);
540 	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
541 }
542 
543 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
544 {
545 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
546 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
547 }
548 
549 #else
550 
551 #define obj_offset(x)			0
552 #define obj_size(cachep)		(cachep->buffer_size)
553 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
554 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
555 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
556 
557 #endif
558 
559 /*
560  * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
561  * order.
562  */
563 #if defined(CONFIG_LARGE_ALLOCS)
564 #define	MAX_OBJ_ORDER	13	/* up to 32Mb */
565 #define	MAX_GFP_ORDER	13	/* up to 32Mb */
566 #elif defined(CONFIG_MMU)
567 #define	MAX_OBJ_ORDER	5	/* 32 pages */
568 #define	MAX_GFP_ORDER	5	/* 32 pages */
569 #else
570 #define	MAX_OBJ_ORDER	8	/* up to 1Mb */
571 #define	MAX_GFP_ORDER	8	/* up to 1Mb */
572 #endif
573 
574 /*
575  * Do not go above this order unless 0 objects fit into the slab.
576  */
577 #define	BREAK_GFP_ORDER_HI	1
578 #define	BREAK_GFP_ORDER_LO	0
579 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
580 
581 /*
582  * Functions for storing/retrieving the cachep and or slab from the page
583  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
584  * these are used to find the cache which an obj belongs to.
585  */
586 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
587 {
588 	page->lru.next = (struct list_head *)cache;
589 }
590 
591 static inline struct kmem_cache *page_get_cache(struct page *page)
592 {
593 	if (unlikely(PageCompound(page)))
594 		page = (struct page *)page_private(page);
595 	return (struct kmem_cache *)page->lru.next;
596 }
597 
598 static inline void page_set_slab(struct page *page, struct slab *slab)
599 {
600 	page->lru.prev = (struct list_head *)slab;
601 }
602 
603 static inline struct slab *page_get_slab(struct page *page)
604 {
605 	if (unlikely(PageCompound(page)))
606 		page = (struct page *)page_private(page);
607 	return (struct slab *)page->lru.prev;
608 }
609 
610 static inline struct kmem_cache *virt_to_cache(const void *obj)
611 {
612 	struct page *page = virt_to_page(obj);
613 	return page_get_cache(page);
614 }
615 
616 static inline struct slab *virt_to_slab(const void *obj)
617 {
618 	struct page *page = virt_to_page(obj);
619 	return page_get_slab(page);
620 }
621 
622 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
623 				 unsigned int idx)
624 {
625 	return slab->s_mem + cache->buffer_size * idx;
626 }
627 
628 static inline unsigned int obj_to_index(struct kmem_cache *cache,
629 					struct slab *slab, void *obj)
630 {
631 	return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
632 }
633 
634 /*
635  * These are the default caches for kmalloc. Custom caches can have other sizes.
636  */
637 struct cache_sizes malloc_sizes[] = {
638 #define CACHE(x) { .cs_size = (x) },
639 #include <linux/kmalloc_sizes.h>
640 	CACHE(ULONG_MAX)
641 #undef CACHE
642 };
643 EXPORT_SYMBOL(malloc_sizes);
644 
645 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
646 struct cache_names {
647 	char *name;
648 	char *name_dma;
649 };
650 
651 static struct cache_names __initdata cache_names[] = {
652 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
653 #include <linux/kmalloc_sizes.h>
654 	{NULL,}
655 #undef CACHE
656 };
657 
658 static struct arraycache_init initarray_cache __initdata =
659     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
660 static struct arraycache_init initarray_generic =
661     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
662 
663 /* internal cache of cache description objs */
664 static struct kmem_cache cache_cache = {
665 	.batchcount = 1,
666 	.limit = BOOT_CPUCACHE_ENTRIES,
667 	.shared = 1,
668 	.buffer_size = sizeof(struct kmem_cache),
669 	.name = "kmem_cache",
670 #if DEBUG
671 	.obj_size = sizeof(struct kmem_cache),
672 #endif
673 };
674 
675 /* Guard access to the cache-chain. */
676 static DEFINE_MUTEX(cache_chain_mutex);
677 static struct list_head cache_chain;
678 
679 /*
680  * vm_enough_memory() looks at this to determine how many slab-allocated pages
681  * are possibly freeable under pressure
682  *
683  * SLAB_RECLAIM_ACCOUNT turns this on per-slab
684  */
685 atomic_t slab_reclaim_pages;
686 
687 /*
688  * chicken and egg problem: delay the per-cpu array allocation
689  * until the general caches are up.
690  */
691 static enum {
692 	NONE,
693 	PARTIAL_AC,
694 	PARTIAL_L3,
695 	FULL
696 } g_cpucache_up;
697 
698 /*
699  * used by boot code to determine if it can use slab based allocator
700  */
701 int slab_is_available(void)
702 {
703 	return g_cpucache_up == FULL;
704 }
705 
706 static DEFINE_PER_CPU(struct work_struct, reap_work);
707 
708 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
709 			int node);
710 static void enable_cpucache(struct kmem_cache *cachep);
711 static void cache_reap(void *unused);
712 static int __node_shrink(struct kmem_cache *cachep, int node);
713 
714 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
715 {
716 	return cachep->array[smp_processor_id()];
717 }
718 
719 static inline struct kmem_cache *__find_general_cachep(size_t size,
720 							gfp_t gfpflags)
721 {
722 	struct cache_sizes *csizep = malloc_sizes;
723 
724 #if DEBUG
725 	/* This happens if someone tries to call
726 	 * kmem_cache_create(), or __kmalloc(), before
727 	 * the generic caches are initialized.
728 	 */
729 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
730 #endif
731 	while (size > csizep->cs_size)
732 		csizep++;
733 
734 	/*
735 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
736 	 * has cs_{dma,}cachep==NULL. Thus no special case
737 	 * for large kmalloc calls required.
738 	 */
739 	if (unlikely(gfpflags & GFP_DMA))
740 		return csizep->cs_dmacachep;
741 	return csizep->cs_cachep;
742 }
743 
744 struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
745 {
746 	return __find_general_cachep(size, gfpflags);
747 }
748 EXPORT_SYMBOL(kmem_find_general_cachep);
749 
750 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
751 {
752 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
753 }
754 
755 /*
756  * Calculate the number of objects and left-over bytes for a given buffer size.
757  */
758 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
759 			   size_t align, int flags, size_t *left_over,
760 			   unsigned int *num)
761 {
762 	int nr_objs;
763 	size_t mgmt_size;
764 	size_t slab_size = PAGE_SIZE << gfporder;
765 
766 	/*
767 	 * The slab management structure can be either off the slab or
768 	 * on it. For the latter case, the memory allocated for a
769 	 * slab is used for:
770 	 *
771 	 * - The struct slab
772 	 * - One kmem_bufctl_t for each object
773 	 * - Padding to respect alignment of @align
774 	 * - @buffer_size bytes for each object
775 	 *
776 	 * If the slab management structure is off the slab, then the
777 	 * alignment will already be calculated into the size. Because
778 	 * the slabs are all pages aligned, the objects will be at the
779 	 * correct alignment when allocated.
780 	 */
781 	if (flags & CFLGS_OFF_SLAB) {
782 		mgmt_size = 0;
783 		nr_objs = slab_size / buffer_size;
784 
785 		if (nr_objs > SLAB_LIMIT)
786 			nr_objs = SLAB_LIMIT;
787 	} else {
788 		/*
789 		 * Ignore padding for the initial guess. The padding
790 		 * is at most @align-1 bytes, and @buffer_size is at
791 		 * least @align. In the worst case, this result will
792 		 * be one greater than the number of objects that fit
793 		 * into the memory allocation when taking the padding
794 		 * into account.
795 		 */
796 		nr_objs = (slab_size - sizeof(struct slab)) /
797 			  (buffer_size + sizeof(kmem_bufctl_t));
798 
799 		/*
800 		 * This calculated number will be either the right
801 		 * amount, or one greater than what we want.
802 		 */
803 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
804 		       > slab_size)
805 			nr_objs--;
806 
807 		if (nr_objs > SLAB_LIMIT)
808 			nr_objs = SLAB_LIMIT;
809 
810 		mgmt_size = slab_mgmt_size(nr_objs, align);
811 	}
812 	*num = nr_objs;
813 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
814 }
815 
816 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
817 
818 static void __slab_error(const char *function, struct kmem_cache *cachep,
819 			char *msg)
820 {
821 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
822 	       function, cachep->name, msg);
823 	dump_stack();
824 }
825 
826 #ifdef CONFIG_NUMA
827 /*
828  * Special reaping functions for NUMA systems called from cache_reap().
829  * These take care of doing round robin flushing of alien caches (containing
830  * objects freed on different nodes from which they were allocated) and the
831  * flushing of remote pcps by calling drain_node_pages.
832  */
833 static DEFINE_PER_CPU(unsigned long, reap_node);
834 
835 static void init_reap_node(int cpu)
836 {
837 	int node;
838 
839 	node = next_node(cpu_to_node(cpu), node_online_map);
840 	if (node == MAX_NUMNODES)
841 		node = first_node(node_online_map);
842 
843 	__get_cpu_var(reap_node) = node;
844 }
845 
846 static void next_reap_node(void)
847 {
848 	int node = __get_cpu_var(reap_node);
849 
850 	/*
851 	 * Also drain per cpu pages on remote zones
852 	 */
853 	if (node != numa_node_id())
854 		drain_node_pages(node);
855 
856 	node = next_node(node, node_online_map);
857 	if (unlikely(node >= MAX_NUMNODES))
858 		node = first_node(node_online_map);
859 	__get_cpu_var(reap_node) = node;
860 }
861 
862 #else
863 #define init_reap_node(cpu) do { } while (0)
864 #define next_reap_node(void) do { } while (0)
865 #endif
866 
867 /*
868  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
869  * via the workqueue/eventd.
870  * Add the CPU number into the expiration time to minimize the possibility of
871  * the CPUs getting into lockstep and contending for the global cache chain
872  * lock.
873  */
874 static void __devinit start_cpu_timer(int cpu)
875 {
876 	struct work_struct *reap_work = &per_cpu(reap_work, cpu);
877 
878 	/*
879 	 * When this gets called from do_initcalls via cpucache_init(),
880 	 * init_workqueues() has already run, so keventd will be setup
881 	 * at that time.
882 	 */
883 	if (keventd_up() && reap_work->func == NULL) {
884 		init_reap_node(cpu);
885 		INIT_WORK(reap_work, cache_reap, NULL);
886 		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
887 	}
888 }
889 
890 static struct array_cache *alloc_arraycache(int node, int entries,
891 					    int batchcount)
892 {
893 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
894 	struct array_cache *nc = NULL;
895 
896 	nc = kmalloc_node(memsize, GFP_KERNEL, node);
897 	if (nc) {
898 		nc->avail = 0;
899 		nc->limit = entries;
900 		nc->batchcount = batchcount;
901 		nc->touched = 0;
902 		spin_lock_init(&nc->lock);
903 	}
904 	return nc;
905 }
906 
907 /*
908  * Transfer objects in one arraycache to another.
909  * Locking must be handled by the caller.
910  *
911  * Return the number of entries transferred.
912  */
913 static int transfer_objects(struct array_cache *to,
914 		struct array_cache *from, unsigned int max)
915 {
916 	/* Figure out how many entries to transfer */
917 	int nr = min(min(from->avail, max), to->limit - to->avail);
918 
919 	if (!nr)
920 		return 0;
921 
922 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
923 			sizeof(void *) *nr);
924 
925 	from->avail -= nr;
926 	to->avail += nr;
927 	to->touched = 1;
928 	return nr;
929 }
930 
931 #ifdef CONFIG_NUMA
932 static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
933 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
934 
935 static struct array_cache **alloc_alien_cache(int node, int limit)
936 {
937 	struct array_cache **ac_ptr;
938 	int memsize = sizeof(void *) * MAX_NUMNODES;
939 	int i;
940 
941 	if (limit > 1)
942 		limit = 12;
943 	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
944 	if (ac_ptr) {
945 		for_each_node(i) {
946 			if (i == node || !node_online(i)) {
947 				ac_ptr[i] = NULL;
948 				continue;
949 			}
950 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
951 			if (!ac_ptr[i]) {
952 				for (i--; i <= 0; i--)
953 					kfree(ac_ptr[i]);
954 				kfree(ac_ptr);
955 				return NULL;
956 			}
957 		}
958 	}
959 	return ac_ptr;
960 }
961 
962 static void free_alien_cache(struct array_cache **ac_ptr)
963 {
964 	int i;
965 
966 	if (!ac_ptr)
967 		return;
968 	for_each_node(i)
969 	    kfree(ac_ptr[i]);
970 	kfree(ac_ptr);
971 }
972 
973 static void __drain_alien_cache(struct kmem_cache *cachep,
974 				struct array_cache *ac, int node)
975 {
976 	struct kmem_list3 *rl3 = cachep->nodelists[node];
977 
978 	if (ac->avail) {
979 		spin_lock(&rl3->list_lock);
980 		/*
981 		 * Stuff objects into the remote nodes shared array first.
982 		 * That way we could avoid the overhead of putting the objects
983 		 * into the free lists and getting them back later.
984 		 */
985 		if (rl3->shared)
986 			transfer_objects(rl3->shared, ac, ac->limit);
987 
988 		free_block(cachep, ac->entry, ac->avail, node);
989 		ac->avail = 0;
990 		spin_unlock(&rl3->list_lock);
991 	}
992 }
993 
994 /*
995  * Called from cache_reap() to regularly drain alien caches round robin.
996  */
997 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
998 {
999 	int node = __get_cpu_var(reap_node);
1000 
1001 	if (l3->alien) {
1002 		struct array_cache *ac = l3->alien[node];
1003 
1004 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1005 			__drain_alien_cache(cachep, ac, node);
1006 			spin_unlock_irq(&ac->lock);
1007 		}
1008 	}
1009 }
1010 
1011 static void drain_alien_cache(struct kmem_cache *cachep,
1012 				struct array_cache **alien)
1013 {
1014 	int i = 0;
1015 	struct array_cache *ac;
1016 	unsigned long flags;
1017 
1018 	for_each_online_node(i) {
1019 		ac = alien[i];
1020 		if (ac) {
1021 			spin_lock_irqsave(&ac->lock, flags);
1022 			__drain_alien_cache(cachep, ac, i);
1023 			spin_unlock_irqrestore(&ac->lock, flags);
1024 		}
1025 	}
1026 }
1027 #else
1028 
1029 #define drain_alien_cache(cachep, alien) do { } while (0)
1030 #define reap_alien(cachep, l3) do { } while (0)
1031 
1032 static inline struct array_cache **alloc_alien_cache(int node, int limit)
1033 {
1034 	return (struct array_cache **) 0x01020304ul;
1035 }
1036 
1037 static inline void free_alien_cache(struct array_cache **ac_ptr)
1038 {
1039 }
1040 
1041 #endif
1042 
1043 static int cpuup_callback(struct notifier_block *nfb,
1044 				    unsigned long action, void *hcpu)
1045 {
1046 	long cpu = (long)hcpu;
1047 	struct kmem_cache *cachep;
1048 	struct kmem_list3 *l3 = NULL;
1049 	int node = cpu_to_node(cpu);
1050 	int memsize = sizeof(struct kmem_list3);
1051 
1052 	switch (action) {
1053 	case CPU_UP_PREPARE:
1054 		mutex_lock(&cache_chain_mutex);
1055 		/*
1056 		 * We need to do this right in the beginning since
1057 		 * alloc_arraycache's are going to use this list.
1058 		 * kmalloc_node allows us to add the slab to the right
1059 		 * kmem_list3 and not this cpu's kmem_list3
1060 		 */
1061 
1062 		list_for_each_entry(cachep, &cache_chain, next) {
1063 			/*
1064 			 * Set up the size64 kmemlist for cpu before we can
1065 			 * begin anything. Make sure some other cpu on this
1066 			 * node has not already allocated this
1067 			 */
1068 			if (!cachep->nodelists[node]) {
1069 				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1070 				if (!l3)
1071 					goto bad;
1072 				kmem_list3_init(l3);
1073 				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1074 				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1075 
1076 				/*
1077 				 * The l3s don't come and go as CPUs come and
1078 				 * go.  cache_chain_mutex is sufficient
1079 				 * protection here.
1080 				 */
1081 				cachep->nodelists[node] = l3;
1082 			}
1083 
1084 			spin_lock_irq(&cachep->nodelists[node]->list_lock);
1085 			cachep->nodelists[node]->free_limit =
1086 				(1 + nr_cpus_node(node)) *
1087 				cachep->batchcount + cachep->num;
1088 			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1089 		}
1090 
1091 		/*
1092 		 * Now we can go ahead with allocating the shared arrays and
1093 		 * array caches
1094 		 */
1095 		list_for_each_entry(cachep, &cache_chain, next) {
1096 			struct array_cache *nc;
1097 			struct array_cache *shared;
1098 			struct array_cache **alien;
1099 
1100 			nc = alloc_arraycache(node, cachep->limit,
1101 						cachep->batchcount);
1102 			if (!nc)
1103 				goto bad;
1104 			shared = alloc_arraycache(node,
1105 					cachep->shared * cachep->batchcount,
1106 					0xbaadf00d);
1107 			if (!shared)
1108 				goto bad;
1109 
1110 			alien = alloc_alien_cache(node, cachep->limit);
1111 			if (!alien)
1112 				goto bad;
1113 			cachep->array[cpu] = nc;
1114 			l3 = cachep->nodelists[node];
1115 			BUG_ON(!l3);
1116 
1117 			spin_lock_irq(&l3->list_lock);
1118 			if (!l3->shared) {
1119 				/*
1120 				 * We are serialised from CPU_DEAD or
1121 				 * CPU_UP_CANCELLED by the cpucontrol lock
1122 				 */
1123 				l3->shared = shared;
1124 				shared = NULL;
1125 			}
1126 #ifdef CONFIG_NUMA
1127 			if (!l3->alien) {
1128 				l3->alien = alien;
1129 				alien = NULL;
1130 			}
1131 #endif
1132 			spin_unlock_irq(&l3->list_lock);
1133 			kfree(shared);
1134 			free_alien_cache(alien);
1135 		}
1136 		mutex_unlock(&cache_chain_mutex);
1137 		break;
1138 	case CPU_ONLINE:
1139 		start_cpu_timer(cpu);
1140 		break;
1141 #ifdef CONFIG_HOTPLUG_CPU
1142 	case CPU_DEAD:
1143 		/*
1144 		 * Even if all the cpus of a node are down, we don't free the
1145 		 * kmem_list3 of any cache. This to avoid a race between
1146 		 * cpu_down, and a kmalloc allocation from another cpu for
1147 		 * memory from the node of the cpu going down.  The list3
1148 		 * structure is usually allocated from kmem_cache_create() and
1149 		 * gets destroyed at kmem_cache_destroy().
1150 		 */
1151 		/* fall thru */
1152 	case CPU_UP_CANCELED:
1153 		mutex_lock(&cache_chain_mutex);
1154 		list_for_each_entry(cachep, &cache_chain, next) {
1155 			struct array_cache *nc;
1156 			struct array_cache *shared;
1157 			struct array_cache **alien;
1158 			cpumask_t mask;
1159 
1160 			mask = node_to_cpumask(node);
1161 			/* cpu is dead; no one can alloc from it. */
1162 			nc = cachep->array[cpu];
1163 			cachep->array[cpu] = NULL;
1164 			l3 = cachep->nodelists[node];
1165 
1166 			if (!l3)
1167 				goto free_array_cache;
1168 
1169 			spin_lock_irq(&l3->list_lock);
1170 
1171 			/* Free limit for this kmem_list3 */
1172 			l3->free_limit -= cachep->batchcount;
1173 			if (nc)
1174 				free_block(cachep, nc->entry, nc->avail, node);
1175 
1176 			if (!cpus_empty(mask)) {
1177 				spin_unlock_irq(&l3->list_lock);
1178 				goto free_array_cache;
1179 			}
1180 
1181 			shared = l3->shared;
1182 			if (shared) {
1183 				free_block(cachep, l3->shared->entry,
1184 					   l3->shared->avail, node);
1185 				l3->shared = NULL;
1186 			}
1187 
1188 			alien = l3->alien;
1189 			l3->alien = NULL;
1190 
1191 			spin_unlock_irq(&l3->list_lock);
1192 
1193 			kfree(shared);
1194 			if (alien) {
1195 				drain_alien_cache(cachep, alien);
1196 				free_alien_cache(alien);
1197 			}
1198 free_array_cache:
1199 			kfree(nc);
1200 		}
1201 		/*
1202 		 * In the previous loop, all the objects were freed to
1203 		 * the respective cache's slabs,  now we can go ahead and
1204 		 * shrink each nodelist to its limit.
1205 		 */
1206 		list_for_each_entry(cachep, &cache_chain, next) {
1207 			l3 = cachep->nodelists[node];
1208 			if (!l3)
1209 				continue;
1210 			spin_lock_irq(&l3->list_lock);
1211 			/* free slabs belonging to this node */
1212 			__node_shrink(cachep, node);
1213 			spin_unlock_irq(&l3->list_lock);
1214 		}
1215 		mutex_unlock(&cache_chain_mutex);
1216 		break;
1217 #endif
1218 	}
1219 	return NOTIFY_OK;
1220 bad:
1221 	mutex_unlock(&cache_chain_mutex);
1222 	return NOTIFY_BAD;
1223 }
1224 
1225 static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
1226 
1227 /*
1228  * swap the static kmem_list3 with kmalloced memory
1229  */
1230 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1231 			int nodeid)
1232 {
1233 	struct kmem_list3 *ptr;
1234 
1235 	BUG_ON(cachep->nodelists[nodeid] != list);
1236 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1237 	BUG_ON(!ptr);
1238 
1239 	local_irq_disable();
1240 	memcpy(ptr, list, sizeof(struct kmem_list3));
1241 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1242 	cachep->nodelists[nodeid] = ptr;
1243 	local_irq_enable();
1244 }
1245 
1246 /*
1247  * Initialisation.  Called after the page allocator have been initialised and
1248  * before smp_init().
1249  */
1250 void __init kmem_cache_init(void)
1251 {
1252 	size_t left_over;
1253 	struct cache_sizes *sizes;
1254 	struct cache_names *names;
1255 	int i;
1256 	int order;
1257 
1258 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1259 		kmem_list3_init(&initkmem_list3[i]);
1260 		if (i < MAX_NUMNODES)
1261 			cache_cache.nodelists[i] = NULL;
1262 	}
1263 
1264 	/*
1265 	 * Fragmentation resistance on low memory - only use bigger
1266 	 * page orders on machines with more than 32MB of memory.
1267 	 */
1268 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1269 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1270 
1271 	/* Bootstrap is tricky, because several objects are allocated
1272 	 * from caches that do not exist yet:
1273 	 * 1) initialize the cache_cache cache: it contains the struct
1274 	 *    kmem_cache structures of all caches, except cache_cache itself:
1275 	 *    cache_cache is statically allocated.
1276 	 *    Initially an __init data area is used for the head array and the
1277 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1278 	 *    array at the end of the bootstrap.
1279 	 * 2) Create the first kmalloc cache.
1280 	 *    The struct kmem_cache for the new cache is allocated normally.
1281 	 *    An __init data area is used for the head array.
1282 	 * 3) Create the remaining kmalloc caches, with minimally sized
1283 	 *    head arrays.
1284 	 * 4) Replace the __init data head arrays for cache_cache and the first
1285 	 *    kmalloc cache with kmalloc allocated arrays.
1286 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1287 	 *    the other cache's with kmalloc allocated memory.
1288 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1289 	 */
1290 
1291 	/* 1) create the cache_cache */
1292 	INIT_LIST_HEAD(&cache_chain);
1293 	list_add(&cache_cache.next, &cache_chain);
1294 	cache_cache.colour_off = cache_line_size();
1295 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1296 	cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
1297 
1298 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1299 					cache_line_size());
1300 
1301 	for (order = 0; order < MAX_ORDER; order++) {
1302 		cache_estimate(order, cache_cache.buffer_size,
1303 			cache_line_size(), 0, &left_over, &cache_cache.num);
1304 		if (cache_cache.num)
1305 			break;
1306 	}
1307 	BUG_ON(!cache_cache.num);
1308 	cache_cache.gfporder = order;
1309 	cache_cache.colour = left_over / cache_cache.colour_off;
1310 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1311 				      sizeof(struct slab), cache_line_size());
1312 
1313 	/* 2+3) create the kmalloc caches */
1314 	sizes = malloc_sizes;
1315 	names = cache_names;
1316 
1317 	/*
1318 	 * Initialize the caches that provide memory for the array cache and the
1319 	 * kmem_list3 structures first.  Without this, further allocations will
1320 	 * bug.
1321 	 */
1322 
1323 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1324 					sizes[INDEX_AC].cs_size,
1325 					ARCH_KMALLOC_MINALIGN,
1326 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1327 					NULL, NULL);
1328 
1329 	if (INDEX_AC != INDEX_L3) {
1330 		sizes[INDEX_L3].cs_cachep =
1331 			kmem_cache_create(names[INDEX_L3].name,
1332 				sizes[INDEX_L3].cs_size,
1333 				ARCH_KMALLOC_MINALIGN,
1334 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1335 				NULL, NULL);
1336 	}
1337 
1338 	while (sizes->cs_size != ULONG_MAX) {
1339 		/*
1340 		 * For performance, all the general caches are L1 aligned.
1341 		 * This should be particularly beneficial on SMP boxes, as it
1342 		 * eliminates "false sharing".
1343 		 * Note for systems short on memory removing the alignment will
1344 		 * allow tighter packing of the smaller caches.
1345 		 */
1346 		if (!sizes->cs_cachep) {
1347 			sizes->cs_cachep = kmem_cache_create(names->name,
1348 					sizes->cs_size,
1349 					ARCH_KMALLOC_MINALIGN,
1350 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1351 					NULL, NULL);
1352 		}
1353 
1354 		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1355 					sizes->cs_size,
1356 					ARCH_KMALLOC_MINALIGN,
1357 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1358 						SLAB_PANIC,
1359 					NULL, NULL);
1360 		sizes++;
1361 		names++;
1362 	}
1363 	/* 4) Replace the bootstrap head arrays */
1364 	{
1365 		void *ptr;
1366 
1367 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1368 
1369 		local_irq_disable();
1370 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1371 		memcpy(ptr, cpu_cache_get(&cache_cache),
1372 		       sizeof(struct arraycache_init));
1373 		cache_cache.array[smp_processor_id()] = ptr;
1374 		local_irq_enable();
1375 
1376 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1377 
1378 		local_irq_disable();
1379 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1380 		       != &initarray_generic.cache);
1381 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1382 		       sizeof(struct arraycache_init));
1383 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1384 		    ptr;
1385 		local_irq_enable();
1386 	}
1387 	/* 5) Replace the bootstrap kmem_list3's */
1388 	{
1389 		int node;
1390 		/* Replace the static kmem_list3 structures for the boot cpu */
1391 		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
1392 			  numa_node_id());
1393 
1394 		for_each_online_node(node) {
1395 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1396 				  &initkmem_list3[SIZE_AC + node], node);
1397 
1398 			if (INDEX_AC != INDEX_L3) {
1399 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1400 					  &initkmem_list3[SIZE_L3 + node],
1401 					  node);
1402 			}
1403 		}
1404 	}
1405 
1406 	/* 6) resize the head arrays to their final sizes */
1407 	{
1408 		struct kmem_cache *cachep;
1409 		mutex_lock(&cache_chain_mutex);
1410 		list_for_each_entry(cachep, &cache_chain, next)
1411 			enable_cpucache(cachep);
1412 		mutex_unlock(&cache_chain_mutex);
1413 	}
1414 
1415 	/* Done! */
1416 	g_cpucache_up = FULL;
1417 
1418 	/*
1419 	 * Register a cpu startup notifier callback that initializes
1420 	 * cpu_cache_get for all new cpus
1421 	 */
1422 	register_cpu_notifier(&cpucache_notifier);
1423 
1424 	/*
1425 	 * The reap timers are started later, with a module init call: That part
1426 	 * of the kernel is not yet operational.
1427 	 */
1428 }
1429 
1430 static int __init cpucache_init(void)
1431 {
1432 	int cpu;
1433 
1434 	/*
1435 	 * Register the timers that return unneeded pages to the page allocator
1436 	 */
1437 	for_each_online_cpu(cpu)
1438 		start_cpu_timer(cpu);
1439 	return 0;
1440 }
1441 __initcall(cpucache_init);
1442 
1443 /*
1444  * Interface to system's page allocator. No need to hold the cache-lock.
1445  *
1446  * If we requested dmaable memory, we will get it. Even if we
1447  * did not request dmaable memory, we might get it, but that
1448  * would be relatively rare and ignorable.
1449  */
1450 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1451 {
1452 	struct page *page;
1453 	void *addr;
1454 	int i;
1455 
1456 	flags |= cachep->gfpflags;
1457 #ifndef CONFIG_MMU
1458 	/* nommu uses slab's for process anonymous memory allocations, so
1459 	 * requires __GFP_COMP to properly refcount higher order allocations"
1460 	 */
1461 	page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
1462 #else
1463 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1464 #endif
1465 	if (!page)
1466 		return NULL;
1467 	addr = page_address(page);
1468 
1469 	i = (1 << cachep->gfporder);
1470 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1471 		atomic_add(i, &slab_reclaim_pages);
1472 	add_page_state(nr_slab, i);
1473 	while (i--) {
1474 		__SetPageSlab(page);
1475 		page++;
1476 	}
1477 	return addr;
1478 }
1479 
1480 /*
1481  * Interface to system's page release.
1482  */
1483 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1484 {
1485 	unsigned long i = (1 << cachep->gfporder);
1486 	struct page *page = virt_to_page(addr);
1487 	const unsigned long nr_freed = i;
1488 
1489 	while (i--) {
1490 		BUG_ON(!PageSlab(page));
1491 		__ClearPageSlab(page);
1492 		page++;
1493 	}
1494 	sub_page_state(nr_slab, nr_freed);
1495 	if (current->reclaim_state)
1496 		current->reclaim_state->reclaimed_slab += nr_freed;
1497 	free_pages((unsigned long)addr, cachep->gfporder);
1498 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1499 		atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
1500 }
1501 
1502 static void kmem_rcu_free(struct rcu_head *head)
1503 {
1504 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1505 	struct kmem_cache *cachep = slab_rcu->cachep;
1506 
1507 	kmem_freepages(cachep, slab_rcu->addr);
1508 	if (OFF_SLAB(cachep))
1509 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1510 }
1511 
1512 #if DEBUG
1513 
1514 #ifdef CONFIG_DEBUG_PAGEALLOC
1515 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1516 			    unsigned long caller)
1517 {
1518 	int size = obj_size(cachep);
1519 
1520 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1521 
1522 	if (size < 5 * sizeof(unsigned long))
1523 		return;
1524 
1525 	*addr++ = 0x12345678;
1526 	*addr++ = caller;
1527 	*addr++ = smp_processor_id();
1528 	size -= 3 * sizeof(unsigned long);
1529 	{
1530 		unsigned long *sptr = &caller;
1531 		unsigned long svalue;
1532 
1533 		while (!kstack_end(sptr)) {
1534 			svalue = *sptr++;
1535 			if (kernel_text_address(svalue)) {
1536 				*addr++ = svalue;
1537 				size -= sizeof(unsigned long);
1538 				if (size <= sizeof(unsigned long))
1539 					break;
1540 			}
1541 		}
1542 
1543 	}
1544 	*addr++ = 0x87654321;
1545 }
1546 #endif
1547 
1548 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1549 {
1550 	int size = obj_size(cachep);
1551 	addr = &((char *)addr)[obj_offset(cachep)];
1552 
1553 	memset(addr, val, size);
1554 	*(unsigned char *)(addr + size - 1) = POISON_END;
1555 }
1556 
1557 static void dump_line(char *data, int offset, int limit)
1558 {
1559 	int i;
1560 	printk(KERN_ERR "%03x:", offset);
1561 	for (i = 0; i < limit; i++)
1562 		printk(" %02x", (unsigned char)data[offset + i]);
1563 	printk("\n");
1564 }
1565 #endif
1566 
1567 #if DEBUG
1568 
1569 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1570 {
1571 	int i, size;
1572 	char *realobj;
1573 
1574 	if (cachep->flags & SLAB_RED_ZONE) {
1575 		printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
1576 			*dbg_redzone1(cachep, objp),
1577 			*dbg_redzone2(cachep, objp));
1578 	}
1579 
1580 	if (cachep->flags & SLAB_STORE_USER) {
1581 		printk(KERN_ERR "Last user: [<%p>]",
1582 			*dbg_userword(cachep, objp));
1583 		print_symbol("(%s)",
1584 				(unsigned long)*dbg_userword(cachep, objp));
1585 		printk("\n");
1586 	}
1587 	realobj = (char *)objp + obj_offset(cachep);
1588 	size = obj_size(cachep);
1589 	for (i = 0; i < size && lines; i += 16, lines--) {
1590 		int limit;
1591 		limit = 16;
1592 		if (i + limit > size)
1593 			limit = size - i;
1594 		dump_line(realobj, i, limit);
1595 	}
1596 }
1597 
1598 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1599 {
1600 	char *realobj;
1601 	int size, i;
1602 	int lines = 0;
1603 
1604 	realobj = (char *)objp + obj_offset(cachep);
1605 	size = obj_size(cachep);
1606 
1607 	for (i = 0; i < size; i++) {
1608 		char exp = POISON_FREE;
1609 		if (i == size - 1)
1610 			exp = POISON_END;
1611 		if (realobj[i] != exp) {
1612 			int limit;
1613 			/* Mismatch ! */
1614 			/* Print header */
1615 			if (lines == 0) {
1616 				printk(KERN_ERR
1617 					"Slab corruption: start=%p, len=%d\n",
1618 					realobj, size);
1619 				print_objinfo(cachep, objp, 0);
1620 			}
1621 			/* Hexdump the affected line */
1622 			i = (i / 16) * 16;
1623 			limit = 16;
1624 			if (i + limit > size)
1625 				limit = size - i;
1626 			dump_line(realobj, i, limit);
1627 			i += 16;
1628 			lines++;
1629 			/* Limit to 5 lines */
1630 			if (lines > 5)
1631 				break;
1632 		}
1633 	}
1634 	if (lines != 0) {
1635 		/* Print some data about the neighboring objects, if they
1636 		 * exist:
1637 		 */
1638 		struct slab *slabp = virt_to_slab(objp);
1639 		unsigned int objnr;
1640 
1641 		objnr = obj_to_index(cachep, slabp, objp);
1642 		if (objnr) {
1643 			objp = index_to_obj(cachep, slabp, objnr - 1);
1644 			realobj = (char *)objp + obj_offset(cachep);
1645 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1646 			       realobj, size);
1647 			print_objinfo(cachep, objp, 2);
1648 		}
1649 		if (objnr + 1 < cachep->num) {
1650 			objp = index_to_obj(cachep, slabp, objnr + 1);
1651 			realobj = (char *)objp + obj_offset(cachep);
1652 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1653 			       realobj, size);
1654 			print_objinfo(cachep, objp, 2);
1655 		}
1656 	}
1657 }
1658 #endif
1659 
1660 #if DEBUG
1661 /**
1662  * slab_destroy_objs - destroy a slab and its objects
1663  * @cachep: cache pointer being destroyed
1664  * @slabp: slab pointer being destroyed
1665  *
1666  * Call the registered destructor for each object in a slab that is being
1667  * destroyed.
1668  */
1669 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1670 {
1671 	int i;
1672 	for (i = 0; i < cachep->num; i++) {
1673 		void *objp = index_to_obj(cachep, slabp, i);
1674 
1675 		if (cachep->flags & SLAB_POISON) {
1676 #ifdef CONFIG_DEBUG_PAGEALLOC
1677 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1678 					OFF_SLAB(cachep))
1679 				kernel_map_pages(virt_to_page(objp),
1680 					cachep->buffer_size / PAGE_SIZE, 1);
1681 			else
1682 				check_poison_obj(cachep, objp);
1683 #else
1684 			check_poison_obj(cachep, objp);
1685 #endif
1686 		}
1687 		if (cachep->flags & SLAB_RED_ZONE) {
1688 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1689 				slab_error(cachep, "start of a freed object "
1690 					   "was overwritten");
1691 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1692 				slab_error(cachep, "end of a freed object "
1693 					   "was overwritten");
1694 		}
1695 		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1696 			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1697 	}
1698 }
1699 #else
1700 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1701 {
1702 	if (cachep->dtor) {
1703 		int i;
1704 		for (i = 0; i < cachep->num; i++) {
1705 			void *objp = index_to_obj(cachep, slabp, i);
1706 			(cachep->dtor) (objp, cachep, 0);
1707 		}
1708 	}
1709 }
1710 #endif
1711 
1712 /**
1713  * slab_destroy - destroy and release all objects in a slab
1714  * @cachep: cache pointer being destroyed
1715  * @slabp: slab pointer being destroyed
1716  *
1717  * Destroy all the objs in a slab, and release the mem back to the system.
1718  * Before calling the slab must have been unlinked from the cache.  The
1719  * cache-lock is not held/needed.
1720  */
1721 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1722 {
1723 	void *addr = slabp->s_mem - slabp->colouroff;
1724 
1725 	slab_destroy_objs(cachep, slabp);
1726 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1727 		struct slab_rcu *slab_rcu;
1728 
1729 		slab_rcu = (struct slab_rcu *)slabp;
1730 		slab_rcu->cachep = cachep;
1731 		slab_rcu->addr = addr;
1732 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1733 	} else {
1734 		kmem_freepages(cachep, addr);
1735 		if (OFF_SLAB(cachep))
1736 			kmem_cache_free(cachep->slabp_cache, slabp);
1737 	}
1738 }
1739 
1740 /*
1741  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1742  * size of kmem_list3.
1743  */
1744 static void set_up_list3s(struct kmem_cache *cachep, int index)
1745 {
1746 	int node;
1747 
1748 	for_each_online_node(node) {
1749 		cachep->nodelists[node] = &initkmem_list3[index + node];
1750 		cachep->nodelists[node]->next_reap = jiffies +
1751 		    REAPTIMEOUT_LIST3 +
1752 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1753 	}
1754 }
1755 
1756 /**
1757  * calculate_slab_order - calculate size (page order) of slabs
1758  * @cachep: pointer to the cache that is being created
1759  * @size: size of objects to be created in this cache.
1760  * @align: required alignment for the objects.
1761  * @flags: slab allocation flags
1762  *
1763  * Also calculates the number of objects per slab.
1764  *
1765  * This could be made much more intelligent.  For now, try to avoid using
1766  * high order pages for slabs.  When the gfp() functions are more friendly
1767  * towards high-order requests, this should be changed.
1768  */
1769 static size_t calculate_slab_order(struct kmem_cache *cachep,
1770 			size_t size, size_t align, unsigned long flags)
1771 {
1772 	unsigned long offslab_limit;
1773 	size_t left_over = 0;
1774 	int gfporder;
1775 
1776 	for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
1777 		unsigned int num;
1778 		size_t remainder;
1779 
1780 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
1781 		if (!num)
1782 			continue;
1783 
1784 		if (flags & CFLGS_OFF_SLAB) {
1785 			/*
1786 			 * Max number of objs-per-slab for caches which
1787 			 * use off-slab slabs. Needed to avoid a possible
1788 			 * looping condition in cache_grow().
1789 			 */
1790 			offslab_limit = size - sizeof(struct slab);
1791 			offslab_limit /= sizeof(kmem_bufctl_t);
1792 
1793  			if (num > offslab_limit)
1794 				break;
1795 		}
1796 
1797 		/* Found something acceptable - save it away */
1798 		cachep->num = num;
1799 		cachep->gfporder = gfporder;
1800 		left_over = remainder;
1801 
1802 		/*
1803 		 * A VFS-reclaimable slab tends to have most allocations
1804 		 * as GFP_NOFS and we really don't want to have to be allocating
1805 		 * higher-order pages when we are unable to shrink dcache.
1806 		 */
1807 		if (flags & SLAB_RECLAIM_ACCOUNT)
1808 			break;
1809 
1810 		/*
1811 		 * Large number of objects is good, but very large slabs are
1812 		 * currently bad for the gfp()s.
1813 		 */
1814 		if (gfporder >= slab_break_gfp_order)
1815 			break;
1816 
1817 		/*
1818 		 * Acceptable internal fragmentation?
1819 		 */
1820 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1821 			break;
1822 	}
1823 	return left_over;
1824 }
1825 
1826 static void setup_cpu_cache(struct kmem_cache *cachep)
1827 {
1828 	if (g_cpucache_up == FULL) {
1829 		enable_cpucache(cachep);
1830 		return;
1831 	}
1832 	if (g_cpucache_up == NONE) {
1833 		/*
1834 		 * Note: the first kmem_cache_create must create the cache
1835 		 * that's used by kmalloc(24), otherwise the creation of
1836 		 * further caches will BUG().
1837 		 */
1838 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
1839 
1840 		/*
1841 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
1842 		 * the first cache, then we need to set up all its list3s,
1843 		 * otherwise the creation of further caches will BUG().
1844 		 */
1845 		set_up_list3s(cachep, SIZE_AC);
1846 		if (INDEX_AC == INDEX_L3)
1847 			g_cpucache_up = PARTIAL_L3;
1848 		else
1849 			g_cpucache_up = PARTIAL_AC;
1850 	} else {
1851 		cachep->array[smp_processor_id()] =
1852 			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1853 
1854 		if (g_cpucache_up == PARTIAL_AC) {
1855 			set_up_list3s(cachep, SIZE_L3);
1856 			g_cpucache_up = PARTIAL_L3;
1857 		} else {
1858 			int node;
1859 			for_each_online_node(node) {
1860 				cachep->nodelists[node] =
1861 				    kmalloc_node(sizeof(struct kmem_list3),
1862 						GFP_KERNEL, node);
1863 				BUG_ON(!cachep->nodelists[node]);
1864 				kmem_list3_init(cachep->nodelists[node]);
1865 			}
1866 		}
1867 	}
1868 	cachep->nodelists[numa_node_id()]->next_reap =
1869 			jiffies + REAPTIMEOUT_LIST3 +
1870 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1871 
1872 	cpu_cache_get(cachep)->avail = 0;
1873 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1874 	cpu_cache_get(cachep)->batchcount = 1;
1875 	cpu_cache_get(cachep)->touched = 0;
1876 	cachep->batchcount = 1;
1877 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1878 }
1879 
1880 /**
1881  * kmem_cache_create - Create a cache.
1882  * @name: A string which is used in /proc/slabinfo to identify this cache.
1883  * @size: The size of objects to be created in this cache.
1884  * @align: The required alignment for the objects.
1885  * @flags: SLAB flags
1886  * @ctor: A constructor for the objects.
1887  * @dtor: A destructor for the objects.
1888  *
1889  * Returns a ptr to the cache on success, NULL on failure.
1890  * Cannot be called within a int, but can be interrupted.
1891  * The @ctor is run when new pages are allocated by the cache
1892  * and the @dtor is run before the pages are handed back.
1893  *
1894  * @name must be valid until the cache is destroyed. This implies that
1895  * the module calling this has to destroy the cache before getting unloaded.
1896  *
1897  * The flags are
1898  *
1899  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1900  * to catch references to uninitialised memory.
1901  *
1902  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1903  * for buffer overruns.
1904  *
1905  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1906  * cacheline.  This can be beneficial if you're counting cycles as closely
1907  * as davem.
1908  */
1909 struct kmem_cache *
1910 kmem_cache_create (const char *name, size_t size, size_t align,
1911 	unsigned long flags,
1912 	void (*ctor)(void*, struct kmem_cache *, unsigned long),
1913 	void (*dtor)(void*, struct kmem_cache *, unsigned long))
1914 {
1915 	size_t left_over, slab_size, ralign;
1916 	struct kmem_cache *cachep = NULL;
1917 	struct list_head *p;
1918 
1919 	/*
1920 	 * Sanity checks... these are all serious usage bugs.
1921 	 */
1922 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
1923 	    (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
1924 		printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
1925 				name);
1926 		BUG();
1927 	}
1928 
1929 	/*
1930 	 * Prevent CPUs from coming and going.
1931 	 * lock_cpu_hotplug() nests outside cache_chain_mutex
1932 	 */
1933 	lock_cpu_hotplug();
1934 
1935 	mutex_lock(&cache_chain_mutex);
1936 
1937 	list_for_each(p, &cache_chain) {
1938 		struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
1939 		mm_segment_t old_fs = get_fs();
1940 		char tmp;
1941 		int res;
1942 
1943 		/*
1944 		 * This happens when the module gets unloaded and doesn't
1945 		 * destroy its slab cache and no-one else reuses the vmalloc
1946 		 * area of the module.  Print a warning.
1947 		 */
1948 		set_fs(KERNEL_DS);
1949 		res = __get_user(tmp, pc->name);
1950 		set_fs(old_fs);
1951 		if (res) {
1952 			printk("SLAB: cache with size %d has lost its name\n",
1953 			       pc->buffer_size);
1954 			continue;
1955 		}
1956 
1957 		if (!strcmp(pc->name, name)) {
1958 			printk("kmem_cache_create: duplicate cache %s\n", name);
1959 			dump_stack();
1960 			goto oops;
1961 		}
1962 	}
1963 
1964 #if DEBUG
1965 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
1966 	if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
1967 		/* No constructor, but inital state check requested */
1968 		printk(KERN_ERR "%s: No con, but init state check "
1969 		       "requested - %s\n", __FUNCTION__, name);
1970 		flags &= ~SLAB_DEBUG_INITIAL;
1971 	}
1972 #if FORCED_DEBUG
1973 	/*
1974 	 * Enable redzoning and last user accounting, except for caches with
1975 	 * large objects, if the increased size would increase the object size
1976 	 * above the next power of two: caches with object sizes just above a
1977 	 * power of two have a significant amount of internal fragmentation.
1978 	 */
1979 	if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
1980 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1981 	if (!(flags & SLAB_DESTROY_BY_RCU))
1982 		flags |= SLAB_POISON;
1983 #endif
1984 	if (flags & SLAB_DESTROY_BY_RCU)
1985 		BUG_ON(flags & SLAB_POISON);
1986 #endif
1987 	if (flags & SLAB_DESTROY_BY_RCU)
1988 		BUG_ON(dtor);
1989 
1990 	/*
1991 	 * Always checks flags, a caller might be expecting debug support which
1992 	 * isn't available.
1993 	 */
1994 	BUG_ON(flags & ~CREATE_MASK);
1995 
1996 	/*
1997 	 * Check that size is in terms of words.  This is needed to avoid
1998 	 * unaligned accesses for some archs when redzoning is used, and makes
1999 	 * sure any on-slab bufctl's are also correctly aligned.
2000 	 */
2001 	if (size & (BYTES_PER_WORD - 1)) {
2002 		size += (BYTES_PER_WORD - 1);
2003 		size &= ~(BYTES_PER_WORD - 1);
2004 	}
2005 
2006 	/* calculate the final buffer alignment: */
2007 
2008 	/* 1) arch recommendation: can be overridden for debug */
2009 	if (flags & SLAB_HWCACHE_ALIGN) {
2010 		/*
2011 		 * Default alignment: as specified by the arch code.  Except if
2012 		 * an object is really small, then squeeze multiple objects into
2013 		 * one cacheline.
2014 		 */
2015 		ralign = cache_line_size();
2016 		while (size <= ralign / 2)
2017 			ralign /= 2;
2018 	} else {
2019 		ralign = BYTES_PER_WORD;
2020 	}
2021 	/* 2) arch mandated alignment: disables debug if necessary */
2022 	if (ralign < ARCH_SLAB_MINALIGN) {
2023 		ralign = ARCH_SLAB_MINALIGN;
2024 		if (ralign > BYTES_PER_WORD)
2025 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2026 	}
2027 	/* 3) caller mandated alignment: disables debug if necessary */
2028 	if (ralign < align) {
2029 		ralign = align;
2030 		if (ralign > BYTES_PER_WORD)
2031 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2032 	}
2033 	/*
2034 	 * 4) Store it. Note that the debug code below can reduce
2035 	 *    the alignment to BYTES_PER_WORD.
2036 	 */
2037 	align = ralign;
2038 
2039 	/* Get cache's description obj. */
2040 	cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
2041 	if (!cachep)
2042 		goto oops;
2043 
2044 #if DEBUG
2045 	cachep->obj_size = size;
2046 
2047 	if (flags & SLAB_RED_ZONE) {
2048 		/* redzoning only works with word aligned caches */
2049 		align = BYTES_PER_WORD;
2050 
2051 		/* add space for red zone words */
2052 		cachep->obj_offset += BYTES_PER_WORD;
2053 		size += 2 * BYTES_PER_WORD;
2054 	}
2055 	if (flags & SLAB_STORE_USER) {
2056 		/* user store requires word alignment and
2057 		 * one word storage behind the end of the real
2058 		 * object.
2059 		 */
2060 		align = BYTES_PER_WORD;
2061 		size += BYTES_PER_WORD;
2062 	}
2063 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2064 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2065 	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2066 		cachep->obj_offset += PAGE_SIZE - size;
2067 		size = PAGE_SIZE;
2068 	}
2069 #endif
2070 #endif
2071 
2072 	/* Determine if the slab management is 'on' or 'off' slab. */
2073 	if (size >= (PAGE_SIZE >> 3))
2074 		/*
2075 		 * Size is large, assume best to place the slab management obj
2076 		 * off-slab (should allow better packing of objs).
2077 		 */
2078 		flags |= CFLGS_OFF_SLAB;
2079 
2080 	size = ALIGN(size, align);
2081 
2082 	left_over = calculate_slab_order(cachep, size, align, flags);
2083 
2084 	if (!cachep->num) {
2085 		printk("kmem_cache_create: couldn't create cache %s.\n", name);
2086 		kmem_cache_free(&cache_cache, cachep);
2087 		cachep = NULL;
2088 		goto oops;
2089 	}
2090 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2091 			  + sizeof(struct slab), align);
2092 
2093 	/*
2094 	 * If the slab has been placed off-slab, and we have enough space then
2095 	 * move it on-slab. This is at the expense of any extra colouring.
2096 	 */
2097 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2098 		flags &= ~CFLGS_OFF_SLAB;
2099 		left_over -= slab_size;
2100 	}
2101 
2102 	if (flags & CFLGS_OFF_SLAB) {
2103 		/* really off slab. No need for manual alignment */
2104 		slab_size =
2105 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2106 	}
2107 
2108 	cachep->colour_off = cache_line_size();
2109 	/* Offset must be a multiple of the alignment. */
2110 	if (cachep->colour_off < align)
2111 		cachep->colour_off = align;
2112 	cachep->colour = left_over / cachep->colour_off;
2113 	cachep->slab_size = slab_size;
2114 	cachep->flags = flags;
2115 	cachep->gfpflags = 0;
2116 	if (flags & SLAB_CACHE_DMA)
2117 		cachep->gfpflags |= GFP_DMA;
2118 	cachep->buffer_size = size;
2119 
2120 	if (flags & CFLGS_OFF_SLAB)
2121 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2122 	cachep->ctor = ctor;
2123 	cachep->dtor = dtor;
2124 	cachep->name = name;
2125 
2126 
2127 	setup_cpu_cache(cachep);
2128 
2129 	/* cache setup completed, link it into the list */
2130 	list_add(&cachep->next, &cache_chain);
2131 oops:
2132 	if (!cachep && (flags & SLAB_PANIC))
2133 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2134 		      name);
2135 	mutex_unlock(&cache_chain_mutex);
2136 	unlock_cpu_hotplug();
2137 	return cachep;
2138 }
2139 EXPORT_SYMBOL(kmem_cache_create);
2140 
2141 #if DEBUG
2142 static void check_irq_off(void)
2143 {
2144 	BUG_ON(!irqs_disabled());
2145 }
2146 
2147 static void check_irq_on(void)
2148 {
2149 	BUG_ON(irqs_disabled());
2150 }
2151 
2152 static void check_spinlock_acquired(struct kmem_cache *cachep)
2153 {
2154 #ifdef CONFIG_SMP
2155 	check_irq_off();
2156 	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2157 #endif
2158 }
2159 
2160 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2161 {
2162 #ifdef CONFIG_SMP
2163 	check_irq_off();
2164 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2165 #endif
2166 }
2167 
2168 #else
2169 #define check_irq_off()	do { } while(0)
2170 #define check_irq_on()	do { } while(0)
2171 #define check_spinlock_acquired(x) do { } while(0)
2172 #define check_spinlock_acquired_node(x, y) do { } while(0)
2173 #endif
2174 
2175 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2176 			struct array_cache *ac,
2177 			int force, int node);
2178 
2179 static void do_drain(void *arg)
2180 {
2181 	struct kmem_cache *cachep = arg;
2182 	struct array_cache *ac;
2183 	int node = numa_node_id();
2184 
2185 	check_irq_off();
2186 	ac = cpu_cache_get(cachep);
2187 	spin_lock(&cachep->nodelists[node]->list_lock);
2188 	free_block(cachep, ac->entry, ac->avail, node);
2189 	spin_unlock(&cachep->nodelists[node]->list_lock);
2190 	ac->avail = 0;
2191 }
2192 
2193 static void drain_cpu_caches(struct kmem_cache *cachep)
2194 {
2195 	struct kmem_list3 *l3;
2196 	int node;
2197 
2198 	on_each_cpu(do_drain, cachep, 1, 1);
2199 	check_irq_on();
2200 	for_each_online_node(node) {
2201 		l3 = cachep->nodelists[node];
2202 		if (l3 && l3->alien)
2203 			drain_alien_cache(cachep, l3->alien);
2204 	}
2205 
2206 	for_each_online_node(node) {
2207 		l3 = cachep->nodelists[node];
2208 		if (l3)
2209 			drain_array(cachep, l3, l3->shared, 1, node);
2210 	}
2211 }
2212 
2213 static int __node_shrink(struct kmem_cache *cachep, int node)
2214 {
2215 	struct slab *slabp;
2216 	struct kmem_list3 *l3 = cachep->nodelists[node];
2217 	int ret;
2218 
2219 	for (;;) {
2220 		struct list_head *p;
2221 
2222 		p = l3->slabs_free.prev;
2223 		if (p == &l3->slabs_free)
2224 			break;
2225 
2226 		slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2227 #if DEBUG
2228 		BUG_ON(slabp->inuse);
2229 #endif
2230 		list_del(&slabp->list);
2231 
2232 		l3->free_objects -= cachep->num;
2233 		spin_unlock_irq(&l3->list_lock);
2234 		slab_destroy(cachep, slabp);
2235 		spin_lock_irq(&l3->list_lock);
2236 	}
2237 	ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
2238 	return ret;
2239 }
2240 
2241 static int __cache_shrink(struct kmem_cache *cachep)
2242 {
2243 	int ret = 0, i = 0;
2244 	struct kmem_list3 *l3;
2245 
2246 	drain_cpu_caches(cachep);
2247 
2248 	check_irq_on();
2249 	for_each_online_node(i) {
2250 		l3 = cachep->nodelists[i];
2251 		if (l3) {
2252 			spin_lock_irq(&l3->list_lock);
2253 			ret += __node_shrink(cachep, i);
2254 			spin_unlock_irq(&l3->list_lock);
2255 		}
2256 	}
2257 	return (ret ? 1 : 0);
2258 }
2259 
2260 /**
2261  * kmem_cache_shrink - Shrink a cache.
2262  * @cachep: The cache to shrink.
2263  *
2264  * Releases as many slabs as possible for a cache.
2265  * To help debugging, a zero exit status indicates all slabs were released.
2266  */
2267 int kmem_cache_shrink(struct kmem_cache *cachep)
2268 {
2269 	BUG_ON(!cachep || in_interrupt());
2270 
2271 	return __cache_shrink(cachep);
2272 }
2273 EXPORT_SYMBOL(kmem_cache_shrink);
2274 
2275 /**
2276  * kmem_cache_destroy - delete a cache
2277  * @cachep: the cache to destroy
2278  *
2279  * Remove a struct kmem_cache object from the slab cache.
2280  * Returns 0 on success.
2281  *
2282  * It is expected this function will be called by a module when it is
2283  * unloaded.  This will remove the cache completely, and avoid a duplicate
2284  * cache being allocated each time a module is loaded and unloaded, if the
2285  * module doesn't have persistent in-kernel storage across loads and unloads.
2286  *
2287  * The cache must be empty before calling this function.
2288  *
2289  * The caller must guarantee that noone will allocate memory from the cache
2290  * during the kmem_cache_destroy().
2291  */
2292 int kmem_cache_destroy(struct kmem_cache *cachep)
2293 {
2294 	int i;
2295 	struct kmem_list3 *l3;
2296 
2297 	BUG_ON(!cachep || in_interrupt());
2298 
2299 	/* Don't let CPUs to come and go */
2300 	lock_cpu_hotplug();
2301 
2302 	/* Find the cache in the chain of caches. */
2303 	mutex_lock(&cache_chain_mutex);
2304 	/*
2305 	 * the chain is never empty, cache_cache is never destroyed
2306 	 */
2307 	list_del(&cachep->next);
2308 	mutex_unlock(&cache_chain_mutex);
2309 
2310 	if (__cache_shrink(cachep)) {
2311 		slab_error(cachep, "Can't free all objects");
2312 		mutex_lock(&cache_chain_mutex);
2313 		list_add(&cachep->next, &cache_chain);
2314 		mutex_unlock(&cache_chain_mutex);
2315 		unlock_cpu_hotplug();
2316 		return 1;
2317 	}
2318 
2319 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2320 		synchronize_rcu();
2321 
2322 	for_each_online_cpu(i)
2323 	    kfree(cachep->array[i]);
2324 
2325 	/* NUMA: free the list3 structures */
2326 	for_each_online_node(i) {
2327 		l3 = cachep->nodelists[i];
2328 		if (l3) {
2329 			kfree(l3->shared);
2330 			free_alien_cache(l3->alien);
2331 			kfree(l3);
2332 		}
2333 	}
2334 	kmem_cache_free(&cache_cache, cachep);
2335 	unlock_cpu_hotplug();
2336 	return 0;
2337 }
2338 EXPORT_SYMBOL(kmem_cache_destroy);
2339 
2340 /* Get the memory for a slab management obj. */
2341 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2342 				   int colour_off, gfp_t local_flags,
2343 				   int nodeid)
2344 {
2345 	struct slab *slabp;
2346 
2347 	if (OFF_SLAB(cachep)) {
2348 		/* Slab management obj is off-slab. */
2349 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2350 					      local_flags, nodeid);
2351 		if (!slabp)
2352 			return NULL;
2353 	} else {
2354 		slabp = objp + colour_off;
2355 		colour_off += cachep->slab_size;
2356 	}
2357 	slabp->inuse = 0;
2358 	slabp->colouroff = colour_off;
2359 	slabp->s_mem = objp + colour_off;
2360 	slabp->nodeid = nodeid;
2361 	return slabp;
2362 }
2363 
2364 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2365 {
2366 	return (kmem_bufctl_t *) (slabp + 1);
2367 }
2368 
2369 static void cache_init_objs(struct kmem_cache *cachep,
2370 			    struct slab *slabp, unsigned long ctor_flags)
2371 {
2372 	int i;
2373 
2374 	for (i = 0; i < cachep->num; i++) {
2375 		void *objp = index_to_obj(cachep, slabp, i);
2376 #if DEBUG
2377 		/* need to poison the objs? */
2378 		if (cachep->flags & SLAB_POISON)
2379 			poison_obj(cachep, objp, POISON_FREE);
2380 		if (cachep->flags & SLAB_STORE_USER)
2381 			*dbg_userword(cachep, objp) = NULL;
2382 
2383 		if (cachep->flags & SLAB_RED_ZONE) {
2384 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2385 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2386 		}
2387 		/*
2388 		 * Constructors are not allowed to allocate memory from the same
2389 		 * cache which they are a constructor for.  Otherwise, deadlock.
2390 		 * They must also be threaded.
2391 		 */
2392 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2393 			cachep->ctor(objp + obj_offset(cachep), cachep,
2394 				     ctor_flags);
2395 
2396 		if (cachep->flags & SLAB_RED_ZONE) {
2397 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2398 				slab_error(cachep, "constructor overwrote the"
2399 					   " end of an object");
2400 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2401 				slab_error(cachep, "constructor overwrote the"
2402 					   " start of an object");
2403 		}
2404 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2405 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2406 			kernel_map_pages(virt_to_page(objp),
2407 					 cachep->buffer_size / PAGE_SIZE, 0);
2408 #else
2409 		if (cachep->ctor)
2410 			cachep->ctor(objp, cachep, ctor_flags);
2411 #endif
2412 		slab_bufctl(slabp)[i] = i + 1;
2413 	}
2414 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2415 	slabp->free = 0;
2416 }
2417 
2418 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2419 {
2420 	if (flags & SLAB_DMA)
2421 		BUG_ON(!(cachep->gfpflags & GFP_DMA));
2422 	else
2423 		BUG_ON(cachep->gfpflags & GFP_DMA);
2424 }
2425 
2426 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2427 				int nodeid)
2428 {
2429 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2430 	kmem_bufctl_t next;
2431 
2432 	slabp->inuse++;
2433 	next = slab_bufctl(slabp)[slabp->free];
2434 #if DEBUG
2435 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2436 	WARN_ON(slabp->nodeid != nodeid);
2437 #endif
2438 	slabp->free = next;
2439 
2440 	return objp;
2441 }
2442 
2443 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2444 				void *objp, int nodeid)
2445 {
2446 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2447 
2448 #if DEBUG
2449 	/* Verify that the slab belongs to the intended node */
2450 	WARN_ON(slabp->nodeid != nodeid);
2451 
2452 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2453 		printk(KERN_ERR "slab: double free detected in cache "
2454 				"'%s', objp %p\n", cachep->name, objp);
2455 		BUG();
2456 	}
2457 #endif
2458 	slab_bufctl(slabp)[objnr] = slabp->free;
2459 	slabp->free = objnr;
2460 	slabp->inuse--;
2461 }
2462 
2463 static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
2464 			void *objp)
2465 {
2466 	int i;
2467 	struct page *page;
2468 
2469 	/* Nasty!!!!!! I hope this is OK. */
2470 	page = virt_to_page(objp);
2471 
2472 	i = 1;
2473 	if (likely(!PageCompound(page)))
2474 		i <<= cachep->gfporder;
2475 	do {
2476 		page_set_cache(page, cachep);
2477 		page_set_slab(page, slabp);
2478 		page++;
2479 	} while (--i);
2480 }
2481 
2482 /*
2483  * Grow (by 1) the number of slabs within a cache.  This is called by
2484  * kmem_cache_alloc() when there are no active objs left in a cache.
2485  */
2486 static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2487 {
2488 	struct slab *slabp;
2489 	void *objp;
2490 	size_t offset;
2491 	gfp_t local_flags;
2492 	unsigned long ctor_flags;
2493 	struct kmem_list3 *l3;
2494 
2495 	/*
2496 	 * Be lazy and only check for valid flags here,  keeping it out of the
2497 	 * critical path in kmem_cache_alloc().
2498 	 */
2499 	BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2500 	if (flags & SLAB_NO_GROW)
2501 		return 0;
2502 
2503 	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2504 	local_flags = (flags & SLAB_LEVEL_MASK);
2505 	if (!(local_flags & __GFP_WAIT))
2506 		/*
2507 		 * Not allowed to sleep.  Need to tell a constructor about
2508 		 * this - it might need to know...
2509 		 */
2510 		ctor_flags |= SLAB_CTOR_ATOMIC;
2511 
2512 	/* Take the l3 list lock to change the colour_next on this node */
2513 	check_irq_off();
2514 	l3 = cachep->nodelists[nodeid];
2515 	spin_lock(&l3->list_lock);
2516 
2517 	/* Get colour for the slab, and cal the next value. */
2518 	offset = l3->colour_next;
2519 	l3->colour_next++;
2520 	if (l3->colour_next >= cachep->colour)
2521 		l3->colour_next = 0;
2522 	spin_unlock(&l3->list_lock);
2523 
2524 	offset *= cachep->colour_off;
2525 
2526 	if (local_flags & __GFP_WAIT)
2527 		local_irq_enable();
2528 
2529 	/*
2530 	 * The test for missing atomic flag is performed here, rather than
2531 	 * the more obvious place, simply to reduce the critical path length
2532 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2533 	 * will eventually be caught here (where it matters).
2534 	 */
2535 	kmem_flagcheck(cachep, flags);
2536 
2537 	/*
2538 	 * Get mem for the objs.  Attempt to allocate a physical page from
2539 	 * 'nodeid'.
2540 	 */
2541 	objp = kmem_getpages(cachep, flags, nodeid);
2542 	if (!objp)
2543 		goto failed;
2544 
2545 	/* Get slab management. */
2546 	slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
2547 	if (!slabp)
2548 		goto opps1;
2549 
2550 	slabp->nodeid = nodeid;
2551 	set_slab_attr(cachep, slabp, objp);
2552 
2553 	cache_init_objs(cachep, slabp, ctor_flags);
2554 
2555 	if (local_flags & __GFP_WAIT)
2556 		local_irq_disable();
2557 	check_irq_off();
2558 	spin_lock(&l3->list_lock);
2559 
2560 	/* Make slab active. */
2561 	list_add_tail(&slabp->list, &(l3->slabs_free));
2562 	STATS_INC_GROWN(cachep);
2563 	l3->free_objects += cachep->num;
2564 	spin_unlock(&l3->list_lock);
2565 	return 1;
2566 opps1:
2567 	kmem_freepages(cachep, objp);
2568 failed:
2569 	if (local_flags & __GFP_WAIT)
2570 		local_irq_disable();
2571 	return 0;
2572 }
2573 
2574 #if DEBUG
2575 
2576 /*
2577  * Perform extra freeing checks:
2578  * - detect bad pointers.
2579  * - POISON/RED_ZONE checking
2580  * - destructor calls, for caches with POISON+dtor
2581  */
2582 static void kfree_debugcheck(const void *objp)
2583 {
2584 	struct page *page;
2585 
2586 	if (!virt_addr_valid(objp)) {
2587 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2588 		       (unsigned long)objp);
2589 		BUG();
2590 	}
2591 	page = virt_to_page(objp);
2592 	if (!PageSlab(page)) {
2593 		printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
2594 		       (unsigned long)objp);
2595 		BUG();
2596 	}
2597 }
2598 
2599 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2600 				   void *caller)
2601 {
2602 	struct page *page;
2603 	unsigned int objnr;
2604 	struct slab *slabp;
2605 
2606 	objp -= obj_offset(cachep);
2607 	kfree_debugcheck(objp);
2608 	page = virt_to_page(objp);
2609 
2610 	if (page_get_cache(page) != cachep) {
2611 		printk(KERN_ERR "mismatch in kmem_cache_free: expected "
2612 				"cache %p, got %p\n",
2613 		       page_get_cache(page), cachep);
2614 		printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
2615 		printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
2616 		       page_get_cache(page)->name);
2617 		WARN_ON(1);
2618 	}
2619 	slabp = page_get_slab(page);
2620 
2621 	if (cachep->flags & SLAB_RED_ZONE) {
2622 		if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
2623 				*dbg_redzone2(cachep, objp) != RED_ACTIVE) {
2624 			slab_error(cachep, "double free, or memory outside"
2625 						" object was overwritten");
2626 			printk(KERN_ERR "%p: redzone 1:0x%lx, "
2627 					"redzone 2:0x%lx.\n",
2628 			       objp, *dbg_redzone1(cachep, objp),
2629 			       *dbg_redzone2(cachep, objp));
2630 		}
2631 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2632 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2633 	}
2634 	if (cachep->flags & SLAB_STORE_USER)
2635 		*dbg_userword(cachep, objp) = caller;
2636 
2637 	objnr = obj_to_index(cachep, slabp, objp);
2638 
2639 	BUG_ON(objnr >= cachep->num);
2640 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2641 
2642 	if (cachep->flags & SLAB_DEBUG_INITIAL) {
2643 		/*
2644 		 * Need to call the slab's constructor so the caller can
2645 		 * perform a verify of its state (debugging).  Called without
2646 		 * the cache-lock held.
2647 		 */
2648 		cachep->ctor(objp + obj_offset(cachep),
2649 			     cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
2650 	}
2651 	if (cachep->flags & SLAB_POISON && cachep->dtor) {
2652 		/* we want to cache poison the object,
2653 		 * call the destruction callback
2654 		 */
2655 		cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2656 	}
2657 #ifdef CONFIG_DEBUG_SLAB_LEAK
2658 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2659 #endif
2660 	if (cachep->flags & SLAB_POISON) {
2661 #ifdef CONFIG_DEBUG_PAGEALLOC
2662 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2663 			store_stackinfo(cachep, objp, (unsigned long)caller);
2664 			kernel_map_pages(virt_to_page(objp),
2665 					 cachep->buffer_size / PAGE_SIZE, 0);
2666 		} else {
2667 			poison_obj(cachep, objp, POISON_FREE);
2668 		}
2669 #else
2670 		poison_obj(cachep, objp, POISON_FREE);
2671 #endif
2672 	}
2673 	return objp;
2674 }
2675 
2676 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2677 {
2678 	kmem_bufctl_t i;
2679 	int entries = 0;
2680 
2681 	/* Check slab's freelist to see if this obj is there. */
2682 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2683 		entries++;
2684 		if (entries > cachep->num || i >= cachep->num)
2685 			goto bad;
2686 	}
2687 	if (entries != cachep->num - slabp->inuse) {
2688 bad:
2689 		printk(KERN_ERR "slab: Internal list corruption detected in "
2690 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2691 			cachep->name, cachep->num, slabp, slabp->inuse);
2692 		for (i = 0;
2693 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2694 		     i++) {
2695 			if (i % 16 == 0)
2696 				printk("\n%03x:", i);
2697 			printk(" %02x", ((unsigned char *)slabp)[i]);
2698 		}
2699 		printk("\n");
2700 		BUG();
2701 	}
2702 }
2703 #else
2704 #define kfree_debugcheck(x) do { } while(0)
2705 #define cache_free_debugcheck(x,objp,z) (objp)
2706 #define check_slabp(x,y) do { } while(0)
2707 #endif
2708 
2709 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2710 {
2711 	int batchcount;
2712 	struct kmem_list3 *l3;
2713 	struct array_cache *ac;
2714 
2715 	check_irq_off();
2716 	ac = cpu_cache_get(cachep);
2717 retry:
2718 	batchcount = ac->batchcount;
2719 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2720 		/*
2721 		 * If there was little recent activity on this cache, then
2722 		 * perform only a partial refill.  Otherwise we could generate
2723 		 * refill bouncing.
2724 		 */
2725 		batchcount = BATCHREFILL_LIMIT;
2726 	}
2727 	l3 = cachep->nodelists[numa_node_id()];
2728 
2729 	BUG_ON(ac->avail > 0 || !l3);
2730 	spin_lock(&l3->list_lock);
2731 
2732 	/* See if we can refill from the shared array */
2733 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2734 		goto alloc_done;
2735 
2736 	while (batchcount > 0) {
2737 		struct list_head *entry;
2738 		struct slab *slabp;
2739 		/* Get slab alloc is to come from. */
2740 		entry = l3->slabs_partial.next;
2741 		if (entry == &l3->slabs_partial) {
2742 			l3->free_touched = 1;
2743 			entry = l3->slabs_free.next;
2744 			if (entry == &l3->slabs_free)
2745 				goto must_grow;
2746 		}
2747 
2748 		slabp = list_entry(entry, struct slab, list);
2749 		check_slabp(cachep, slabp);
2750 		check_spinlock_acquired(cachep);
2751 		while (slabp->inuse < cachep->num && batchcount--) {
2752 			STATS_INC_ALLOCED(cachep);
2753 			STATS_INC_ACTIVE(cachep);
2754 			STATS_SET_HIGH(cachep);
2755 
2756 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2757 							    numa_node_id());
2758 		}
2759 		check_slabp(cachep, slabp);
2760 
2761 		/* move slabp to correct slabp list: */
2762 		list_del(&slabp->list);
2763 		if (slabp->free == BUFCTL_END)
2764 			list_add(&slabp->list, &l3->slabs_full);
2765 		else
2766 			list_add(&slabp->list, &l3->slabs_partial);
2767 	}
2768 
2769 must_grow:
2770 	l3->free_objects -= ac->avail;
2771 alloc_done:
2772 	spin_unlock(&l3->list_lock);
2773 
2774 	if (unlikely(!ac->avail)) {
2775 		int x;
2776 		x = cache_grow(cachep, flags, numa_node_id());
2777 
2778 		/* cache_grow can reenable interrupts, then ac could change. */
2779 		ac = cpu_cache_get(cachep);
2780 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
2781 			return NULL;
2782 
2783 		if (!ac->avail)		/* objects refilled by interrupt? */
2784 			goto retry;
2785 	}
2786 	ac->touched = 1;
2787 	return ac->entry[--ac->avail];
2788 }
2789 
2790 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2791 						gfp_t flags)
2792 {
2793 	might_sleep_if(flags & __GFP_WAIT);
2794 #if DEBUG
2795 	kmem_flagcheck(cachep, flags);
2796 #endif
2797 }
2798 
2799 #if DEBUG
2800 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2801 				gfp_t flags, void *objp, void *caller)
2802 {
2803 	if (!objp)
2804 		return objp;
2805 	if (cachep->flags & SLAB_POISON) {
2806 #ifdef CONFIG_DEBUG_PAGEALLOC
2807 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2808 			kernel_map_pages(virt_to_page(objp),
2809 					 cachep->buffer_size / PAGE_SIZE, 1);
2810 		else
2811 			check_poison_obj(cachep, objp);
2812 #else
2813 		check_poison_obj(cachep, objp);
2814 #endif
2815 		poison_obj(cachep, objp, POISON_INUSE);
2816 	}
2817 	if (cachep->flags & SLAB_STORE_USER)
2818 		*dbg_userword(cachep, objp) = caller;
2819 
2820 	if (cachep->flags & SLAB_RED_ZONE) {
2821 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2822 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2823 			slab_error(cachep, "double free, or memory outside"
2824 						" object was overwritten");
2825 			printk(KERN_ERR
2826 				"%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
2827 				objp, *dbg_redzone1(cachep, objp),
2828 				*dbg_redzone2(cachep, objp));
2829 		}
2830 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
2831 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
2832 	}
2833 #ifdef CONFIG_DEBUG_SLAB_LEAK
2834 	{
2835 		struct slab *slabp;
2836 		unsigned objnr;
2837 
2838 		slabp = page_get_slab(virt_to_page(objp));
2839 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2840 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
2841 	}
2842 #endif
2843 	objp += obj_offset(cachep);
2844 	if (cachep->ctor && cachep->flags & SLAB_POISON) {
2845 		unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2846 
2847 		if (!(flags & __GFP_WAIT))
2848 			ctor_flags |= SLAB_CTOR_ATOMIC;
2849 
2850 		cachep->ctor(objp, cachep, ctor_flags);
2851 	}
2852 	return objp;
2853 }
2854 #else
2855 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2856 #endif
2857 
2858 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2859 {
2860 	void *objp;
2861 	struct array_cache *ac;
2862 
2863 #ifdef CONFIG_NUMA
2864 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
2865 		objp = alternate_node_alloc(cachep, flags);
2866 		if (objp != NULL)
2867 			return objp;
2868 	}
2869 #endif
2870 
2871 	check_irq_off();
2872 	ac = cpu_cache_get(cachep);
2873 	if (likely(ac->avail)) {
2874 		STATS_INC_ALLOCHIT(cachep);
2875 		ac->touched = 1;
2876 		objp = ac->entry[--ac->avail];
2877 	} else {
2878 		STATS_INC_ALLOCMISS(cachep);
2879 		objp = cache_alloc_refill(cachep, flags);
2880 	}
2881 	return objp;
2882 }
2883 
2884 static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
2885 						gfp_t flags, void *caller)
2886 {
2887 	unsigned long save_flags;
2888 	void *objp;
2889 
2890 	cache_alloc_debugcheck_before(cachep, flags);
2891 
2892 	local_irq_save(save_flags);
2893 	objp = ____cache_alloc(cachep, flags);
2894 	local_irq_restore(save_flags);
2895 	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
2896 					    caller);
2897 	prefetchw(objp);
2898 	return objp;
2899 }
2900 
2901 #ifdef CONFIG_NUMA
2902 /*
2903  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
2904  *
2905  * If we are in_interrupt, then process context, including cpusets and
2906  * mempolicy, may not apply and should not be used for allocation policy.
2907  */
2908 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2909 {
2910 	int nid_alloc, nid_here;
2911 
2912 	if (in_interrupt())
2913 		return NULL;
2914 	nid_alloc = nid_here = numa_node_id();
2915 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2916 		nid_alloc = cpuset_mem_spread_node();
2917 	else if (current->mempolicy)
2918 		nid_alloc = slab_node(current->mempolicy);
2919 	if (nid_alloc != nid_here)
2920 		return __cache_alloc_node(cachep, flags, nid_alloc);
2921 	return NULL;
2922 }
2923 
2924 /*
2925  * A interface to enable slab creation on nodeid
2926  */
2927 static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
2928 				int nodeid)
2929 {
2930 	struct list_head *entry;
2931 	struct slab *slabp;
2932 	struct kmem_list3 *l3;
2933 	void *obj;
2934 	int x;
2935 
2936 	l3 = cachep->nodelists[nodeid];
2937 	BUG_ON(!l3);
2938 
2939 retry:
2940 	check_irq_off();
2941 	spin_lock(&l3->list_lock);
2942 	entry = l3->slabs_partial.next;
2943 	if (entry == &l3->slabs_partial) {
2944 		l3->free_touched = 1;
2945 		entry = l3->slabs_free.next;
2946 		if (entry == &l3->slabs_free)
2947 			goto must_grow;
2948 	}
2949 
2950 	slabp = list_entry(entry, struct slab, list);
2951 	check_spinlock_acquired_node(cachep, nodeid);
2952 	check_slabp(cachep, slabp);
2953 
2954 	STATS_INC_NODEALLOCS(cachep);
2955 	STATS_INC_ACTIVE(cachep);
2956 	STATS_SET_HIGH(cachep);
2957 
2958 	BUG_ON(slabp->inuse == cachep->num);
2959 
2960 	obj = slab_get_obj(cachep, slabp, nodeid);
2961 	check_slabp(cachep, slabp);
2962 	l3->free_objects--;
2963 	/* move slabp to correct slabp list: */
2964 	list_del(&slabp->list);
2965 
2966 	if (slabp->free == BUFCTL_END)
2967 		list_add(&slabp->list, &l3->slabs_full);
2968 	else
2969 		list_add(&slabp->list, &l3->slabs_partial);
2970 
2971 	spin_unlock(&l3->list_lock);
2972 	goto done;
2973 
2974 must_grow:
2975 	spin_unlock(&l3->list_lock);
2976 	x = cache_grow(cachep, flags, nodeid);
2977 
2978 	if (!x)
2979 		return NULL;
2980 
2981 	goto retry;
2982 done:
2983 	return obj;
2984 }
2985 #endif
2986 
2987 /*
2988  * Caller needs to acquire correct kmem_list's list_lock
2989  */
2990 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
2991 		       int node)
2992 {
2993 	int i;
2994 	struct kmem_list3 *l3;
2995 
2996 	for (i = 0; i < nr_objects; i++) {
2997 		void *objp = objpp[i];
2998 		struct slab *slabp;
2999 
3000 		slabp = virt_to_slab(objp);
3001 		l3 = cachep->nodelists[node];
3002 		list_del(&slabp->list);
3003 		check_spinlock_acquired_node(cachep, node);
3004 		check_slabp(cachep, slabp);
3005 		slab_put_obj(cachep, slabp, objp, node);
3006 		STATS_DEC_ACTIVE(cachep);
3007 		l3->free_objects++;
3008 		check_slabp(cachep, slabp);
3009 
3010 		/* fixup slab chains */
3011 		if (slabp->inuse == 0) {
3012 			if (l3->free_objects > l3->free_limit) {
3013 				l3->free_objects -= cachep->num;
3014 				slab_destroy(cachep, slabp);
3015 			} else {
3016 				list_add(&slabp->list, &l3->slabs_free);
3017 			}
3018 		} else {
3019 			/* Unconditionally move a slab to the end of the
3020 			 * partial list on free - maximum time for the
3021 			 * other objects to be freed, too.
3022 			 */
3023 			list_add_tail(&slabp->list, &l3->slabs_partial);
3024 		}
3025 	}
3026 }
3027 
3028 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3029 {
3030 	int batchcount;
3031 	struct kmem_list3 *l3;
3032 	int node = numa_node_id();
3033 
3034 	batchcount = ac->batchcount;
3035 #if DEBUG
3036 	BUG_ON(!batchcount || batchcount > ac->avail);
3037 #endif
3038 	check_irq_off();
3039 	l3 = cachep->nodelists[node];
3040 	spin_lock(&l3->list_lock);
3041 	if (l3->shared) {
3042 		struct array_cache *shared_array = l3->shared;
3043 		int max = shared_array->limit - shared_array->avail;
3044 		if (max) {
3045 			if (batchcount > max)
3046 				batchcount = max;
3047 			memcpy(&(shared_array->entry[shared_array->avail]),
3048 			       ac->entry, sizeof(void *) * batchcount);
3049 			shared_array->avail += batchcount;
3050 			goto free_done;
3051 		}
3052 	}
3053 
3054 	free_block(cachep, ac->entry, batchcount, node);
3055 free_done:
3056 #if STATS
3057 	{
3058 		int i = 0;
3059 		struct list_head *p;
3060 
3061 		p = l3->slabs_free.next;
3062 		while (p != &(l3->slabs_free)) {
3063 			struct slab *slabp;
3064 
3065 			slabp = list_entry(p, struct slab, list);
3066 			BUG_ON(slabp->inuse);
3067 
3068 			i++;
3069 			p = p->next;
3070 		}
3071 		STATS_SET_FREEABLE(cachep, i);
3072 	}
3073 #endif
3074 	spin_unlock(&l3->list_lock);
3075 	ac->avail -= batchcount;
3076 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3077 }
3078 
3079 /*
3080  * Release an obj back to its cache. If the obj has a constructed state, it must
3081  * be in this state _before_ it is released.  Called with disabled ints.
3082  */
3083 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3084 {
3085 	struct array_cache *ac = cpu_cache_get(cachep);
3086 
3087 	check_irq_off();
3088 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3089 
3090 	/* Make sure we are not freeing a object from another
3091 	 * node to the array cache on this cpu.
3092 	 */
3093 #ifdef CONFIG_NUMA
3094 	{
3095 		struct slab *slabp;
3096 		slabp = virt_to_slab(objp);
3097 		if (unlikely(slabp->nodeid != numa_node_id())) {
3098 			struct array_cache *alien = NULL;
3099 			int nodeid = slabp->nodeid;
3100 			struct kmem_list3 *l3;
3101 
3102 			l3 = cachep->nodelists[numa_node_id()];
3103 			STATS_INC_NODEFREES(cachep);
3104 			if (l3->alien && l3->alien[nodeid]) {
3105 				alien = l3->alien[nodeid];
3106 				spin_lock(&alien->lock);
3107 				if (unlikely(alien->avail == alien->limit)) {
3108 					STATS_INC_ACOVERFLOW(cachep);
3109 					__drain_alien_cache(cachep,
3110 							    alien, nodeid);
3111 				}
3112 				alien->entry[alien->avail++] = objp;
3113 				spin_unlock(&alien->lock);
3114 			} else {
3115 				spin_lock(&(cachep->nodelists[nodeid])->
3116 					  list_lock);
3117 				free_block(cachep, &objp, 1, nodeid);
3118 				spin_unlock(&(cachep->nodelists[nodeid])->
3119 					    list_lock);
3120 			}
3121 			return;
3122 		}
3123 	}
3124 #endif
3125 	if (likely(ac->avail < ac->limit)) {
3126 		STATS_INC_FREEHIT(cachep);
3127 		ac->entry[ac->avail++] = objp;
3128 		return;
3129 	} else {
3130 		STATS_INC_FREEMISS(cachep);
3131 		cache_flusharray(cachep, ac);
3132 		ac->entry[ac->avail++] = objp;
3133 	}
3134 }
3135 
3136 /**
3137  * kmem_cache_alloc - Allocate an object
3138  * @cachep: The cache to allocate from.
3139  * @flags: See kmalloc().
3140  *
3141  * Allocate an object from this cache.  The flags are only relevant
3142  * if the cache has no available objects.
3143  */
3144 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3145 {
3146 	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3147 }
3148 EXPORT_SYMBOL(kmem_cache_alloc);
3149 
3150 /**
3151  * kmem_cache_alloc - Allocate an object. The memory is set to zero.
3152  * @cache: The cache to allocate from.
3153  * @flags: See kmalloc().
3154  *
3155  * Allocate an object from this cache and set the allocated memory to zero.
3156  * The flags are only relevant if the cache has no available objects.
3157  */
3158 void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3159 {
3160 	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3161 	if (ret)
3162 		memset(ret, 0, obj_size(cache));
3163 	return ret;
3164 }
3165 EXPORT_SYMBOL(kmem_cache_zalloc);
3166 
3167 /**
3168  * kmem_ptr_validate - check if an untrusted pointer might
3169  *	be a slab entry.
3170  * @cachep: the cache we're checking against
3171  * @ptr: pointer to validate
3172  *
3173  * This verifies that the untrusted pointer looks sane:
3174  * it is _not_ a guarantee that the pointer is actually
3175  * part of the slab cache in question, but it at least
3176  * validates that the pointer can be dereferenced and
3177  * looks half-way sane.
3178  *
3179  * Currently only used for dentry validation.
3180  */
3181 int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
3182 {
3183 	unsigned long addr = (unsigned long)ptr;
3184 	unsigned long min_addr = PAGE_OFFSET;
3185 	unsigned long align_mask = BYTES_PER_WORD - 1;
3186 	unsigned long size = cachep->buffer_size;
3187 	struct page *page;
3188 
3189 	if (unlikely(addr < min_addr))
3190 		goto out;
3191 	if (unlikely(addr > (unsigned long)high_memory - size))
3192 		goto out;
3193 	if (unlikely(addr & align_mask))
3194 		goto out;
3195 	if (unlikely(!kern_addr_valid(addr)))
3196 		goto out;
3197 	if (unlikely(!kern_addr_valid(addr + size - 1)))
3198 		goto out;
3199 	page = virt_to_page(ptr);
3200 	if (unlikely(!PageSlab(page)))
3201 		goto out;
3202 	if (unlikely(page_get_cache(page) != cachep))
3203 		goto out;
3204 	return 1;
3205 out:
3206 	return 0;
3207 }
3208 
3209 #ifdef CONFIG_NUMA
3210 /**
3211  * kmem_cache_alloc_node - Allocate an object on the specified node
3212  * @cachep: The cache to allocate from.
3213  * @flags: See kmalloc().
3214  * @nodeid: node number of the target node.
3215  *
3216  * Identical to kmem_cache_alloc, except that this function is slow
3217  * and can sleep. And it will allocate memory on the given node, which
3218  * can improve the performance for cpu bound structures.
3219  * New and improved: it will now make sure that the object gets
3220  * put on the correct node list so that there is no false sharing.
3221  */
3222 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3223 {
3224 	unsigned long save_flags;
3225 	void *ptr;
3226 
3227 	cache_alloc_debugcheck_before(cachep, flags);
3228 	local_irq_save(save_flags);
3229 
3230 	if (nodeid == -1 || nodeid == numa_node_id() ||
3231 			!cachep->nodelists[nodeid])
3232 		ptr = ____cache_alloc(cachep, flags);
3233 	else
3234 		ptr = __cache_alloc_node(cachep, flags, nodeid);
3235 	local_irq_restore(save_flags);
3236 
3237 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
3238 					   __builtin_return_address(0));
3239 
3240 	return ptr;
3241 }
3242 EXPORT_SYMBOL(kmem_cache_alloc_node);
3243 
3244 void *kmalloc_node(size_t size, gfp_t flags, int node)
3245 {
3246 	struct kmem_cache *cachep;
3247 
3248 	cachep = kmem_find_general_cachep(size, flags);
3249 	if (unlikely(cachep == NULL))
3250 		return NULL;
3251 	return kmem_cache_alloc_node(cachep, flags, node);
3252 }
3253 EXPORT_SYMBOL(kmalloc_node);
3254 #endif
3255 
3256 /**
3257  * kmalloc - allocate memory
3258  * @size: how many bytes of memory are required.
3259  * @flags: the type of memory to allocate.
3260  * @caller: function caller for debug tracking of the caller
3261  *
3262  * kmalloc is the normal method of allocating memory
3263  * in the kernel.
3264  *
3265  * The @flags argument may be one of:
3266  *
3267  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
3268  *
3269  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
3270  *
3271  * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers.
3272  *
3273  * Additionally, the %GFP_DMA flag may be set to indicate the memory
3274  * must be suitable for DMA.  This can mean different things on different
3275  * platforms.  For example, on i386, it means that the memory must come
3276  * from the first 16MB.
3277  */
3278 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3279 					  void *caller)
3280 {
3281 	struct kmem_cache *cachep;
3282 
3283 	/* If you want to save a few bytes .text space: replace
3284 	 * __ with kmem_.
3285 	 * Then kmalloc uses the uninlined functions instead of the inline
3286 	 * functions.
3287 	 */
3288 	cachep = __find_general_cachep(size, flags);
3289 	if (unlikely(cachep == NULL))
3290 		return NULL;
3291 	return __cache_alloc(cachep, flags, caller);
3292 }
3293 
3294 
3295 void *__kmalloc(size_t size, gfp_t flags)
3296 {
3297 #ifndef CONFIG_DEBUG_SLAB
3298 	return __do_kmalloc(size, flags, NULL);
3299 #else
3300 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3301 #endif
3302 }
3303 EXPORT_SYMBOL(__kmalloc);
3304 
3305 #ifdef CONFIG_DEBUG_SLAB
3306 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3307 {
3308 	return __do_kmalloc(size, flags, caller);
3309 }
3310 EXPORT_SYMBOL(__kmalloc_track_caller);
3311 #endif
3312 
3313 #ifdef CONFIG_SMP
3314 /**
3315  * __alloc_percpu - allocate one copy of the object for every present
3316  * cpu in the system, zeroing them.
3317  * Objects should be dereferenced using the per_cpu_ptr macro only.
3318  *
3319  * @size: how many bytes of memory are required.
3320  */
3321 void *__alloc_percpu(size_t size)
3322 {
3323 	int i;
3324 	struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
3325 
3326 	if (!pdata)
3327 		return NULL;
3328 
3329 	/*
3330 	 * Cannot use for_each_online_cpu since a cpu may come online
3331 	 * and we have no way of figuring out how to fix the array
3332 	 * that we have allocated then....
3333 	 */
3334 	for_each_possible_cpu(i) {
3335 		int node = cpu_to_node(i);
3336 
3337 		if (node_online(node))
3338 			pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
3339 		else
3340 			pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
3341 
3342 		if (!pdata->ptrs[i])
3343 			goto unwind_oom;
3344 		memset(pdata->ptrs[i], 0, size);
3345 	}
3346 
3347 	/* Catch derefs w/o wrappers */
3348 	return (void *)(~(unsigned long)pdata);
3349 
3350 unwind_oom:
3351 	while (--i >= 0) {
3352 		if (!cpu_possible(i))
3353 			continue;
3354 		kfree(pdata->ptrs[i]);
3355 	}
3356 	kfree(pdata);
3357 	return NULL;
3358 }
3359 EXPORT_SYMBOL(__alloc_percpu);
3360 #endif
3361 
3362 /**
3363  * kmem_cache_free - Deallocate an object
3364  * @cachep: The cache the allocation was from.
3365  * @objp: The previously allocated object.
3366  *
3367  * Free an object which was previously allocated from this
3368  * cache.
3369  */
3370 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3371 {
3372 	unsigned long flags;
3373 
3374 	local_irq_save(flags);
3375 	__cache_free(cachep, objp);
3376 	local_irq_restore(flags);
3377 }
3378 EXPORT_SYMBOL(kmem_cache_free);
3379 
3380 /**
3381  * kfree - free previously allocated memory
3382  * @objp: pointer returned by kmalloc.
3383  *
3384  * If @objp is NULL, no operation is performed.
3385  *
3386  * Don't free memory not originally allocated by kmalloc()
3387  * or you will run into trouble.
3388  */
3389 void kfree(const void *objp)
3390 {
3391 	struct kmem_cache *c;
3392 	unsigned long flags;
3393 
3394 	if (unlikely(!objp))
3395 		return;
3396 	local_irq_save(flags);
3397 	kfree_debugcheck(objp);
3398 	c = virt_to_cache(objp);
3399 	mutex_debug_check_no_locks_freed(objp, obj_size(c));
3400 	__cache_free(c, (void *)objp);
3401 	local_irq_restore(flags);
3402 }
3403 EXPORT_SYMBOL(kfree);
3404 
3405 #ifdef CONFIG_SMP
3406 /**
3407  * free_percpu - free previously allocated percpu memory
3408  * @objp: pointer returned by alloc_percpu.
3409  *
3410  * Don't free memory not originally allocated by alloc_percpu()
3411  * The complemented objp is to check for that.
3412  */
3413 void free_percpu(const void *objp)
3414 {
3415 	int i;
3416 	struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
3417 
3418 	/*
3419 	 * We allocate for all cpus so we cannot use for online cpu here.
3420 	 */
3421 	for_each_possible_cpu(i)
3422 	    kfree(p->ptrs[i]);
3423 	kfree(p);
3424 }
3425 EXPORT_SYMBOL(free_percpu);
3426 #endif
3427 
3428 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3429 {
3430 	return obj_size(cachep);
3431 }
3432 EXPORT_SYMBOL(kmem_cache_size);
3433 
3434 const char *kmem_cache_name(struct kmem_cache *cachep)
3435 {
3436 	return cachep->name;
3437 }
3438 EXPORT_SYMBOL_GPL(kmem_cache_name);
3439 
3440 /*
3441  * This initializes kmem_list3 or resizes varioius caches for all nodes.
3442  */
3443 static int alloc_kmemlist(struct kmem_cache *cachep)
3444 {
3445 	int node;
3446 	struct kmem_list3 *l3;
3447 	struct array_cache *new_shared;
3448 	struct array_cache **new_alien;
3449 
3450 	for_each_online_node(node) {
3451 
3452 		new_alien = alloc_alien_cache(node, cachep->limit);
3453 		if (!new_alien)
3454 			goto fail;
3455 
3456 		new_shared = alloc_arraycache(node,
3457 				cachep->shared*cachep->batchcount,
3458 					0xbaadf00d);
3459 		if (!new_shared) {
3460 			free_alien_cache(new_alien);
3461 			goto fail;
3462 		}
3463 
3464 		l3 = cachep->nodelists[node];
3465 		if (l3) {
3466 			struct array_cache *shared = l3->shared;
3467 
3468 			spin_lock_irq(&l3->list_lock);
3469 
3470 			if (shared)
3471 				free_block(cachep, shared->entry,
3472 						shared->avail, node);
3473 
3474 			l3->shared = new_shared;
3475 			if (!l3->alien) {
3476 				l3->alien = new_alien;
3477 				new_alien = NULL;
3478 			}
3479 			l3->free_limit = (1 + nr_cpus_node(node)) *
3480 					cachep->batchcount + cachep->num;
3481 			spin_unlock_irq(&l3->list_lock);
3482 			kfree(shared);
3483 			free_alien_cache(new_alien);
3484 			continue;
3485 		}
3486 		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3487 		if (!l3) {
3488 			free_alien_cache(new_alien);
3489 			kfree(new_shared);
3490 			goto fail;
3491 		}
3492 
3493 		kmem_list3_init(l3);
3494 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3495 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3496 		l3->shared = new_shared;
3497 		l3->alien = new_alien;
3498 		l3->free_limit = (1 + nr_cpus_node(node)) *
3499 					cachep->batchcount + cachep->num;
3500 		cachep->nodelists[node] = l3;
3501 	}
3502 	return 0;
3503 
3504 fail:
3505 	if (!cachep->next.next) {
3506 		/* Cache is not active yet. Roll back what we did */
3507 		node--;
3508 		while (node >= 0) {
3509 			if (cachep->nodelists[node]) {
3510 				l3 = cachep->nodelists[node];
3511 
3512 				kfree(l3->shared);
3513 				free_alien_cache(l3->alien);
3514 				kfree(l3);
3515 				cachep->nodelists[node] = NULL;
3516 			}
3517 			node--;
3518 		}
3519 	}
3520 	return -ENOMEM;
3521 }
3522 
3523 struct ccupdate_struct {
3524 	struct kmem_cache *cachep;
3525 	struct array_cache *new[NR_CPUS];
3526 };
3527 
3528 static void do_ccupdate_local(void *info)
3529 {
3530 	struct ccupdate_struct *new = info;
3531 	struct array_cache *old;
3532 
3533 	check_irq_off();
3534 	old = cpu_cache_get(new->cachep);
3535 
3536 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3537 	new->new[smp_processor_id()] = old;
3538 }
3539 
3540 /* Always called with the cache_chain_mutex held */
3541 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3542 				int batchcount, int shared)
3543 {
3544 	struct ccupdate_struct new;
3545 	int i, err;
3546 
3547 	memset(&new.new, 0, sizeof(new.new));
3548 	for_each_online_cpu(i) {
3549 		new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
3550 						batchcount);
3551 		if (!new.new[i]) {
3552 			for (i--; i >= 0; i--)
3553 				kfree(new.new[i]);
3554 			return -ENOMEM;
3555 		}
3556 	}
3557 	new.cachep = cachep;
3558 
3559 	on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
3560 
3561 	check_irq_on();
3562 	cachep->batchcount = batchcount;
3563 	cachep->limit = limit;
3564 	cachep->shared = shared;
3565 
3566 	for_each_online_cpu(i) {
3567 		struct array_cache *ccold = new.new[i];
3568 		if (!ccold)
3569 			continue;
3570 		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3571 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3572 		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3573 		kfree(ccold);
3574 	}
3575 
3576 	err = alloc_kmemlist(cachep);
3577 	if (err) {
3578 		printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
3579 		       cachep->name, -err);
3580 		BUG();
3581 	}
3582 	return 0;
3583 }
3584 
3585 /* Called with cache_chain_mutex held always */
3586 static void enable_cpucache(struct kmem_cache *cachep)
3587 {
3588 	int err;
3589 	int limit, shared;
3590 
3591 	/*
3592 	 * The head array serves three purposes:
3593 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3594 	 * - reduce the number of spinlock operations.
3595 	 * - reduce the number of linked list operations on the slab and
3596 	 *   bufctl chains: array operations are cheaper.
3597 	 * The numbers are guessed, we should auto-tune as described by
3598 	 * Bonwick.
3599 	 */
3600 	if (cachep->buffer_size > 131072)
3601 		limit = 1;
3602 	else if (cachep->buffer_size > PAGE_SIZE)
3603 		limit = 8;
3604 	else if (cachep->buffer_size > 1024)
3605 		limit = 24;
3606 	else if (cachep->buffer_size > 256)
3607 		limit = 54;
3608 	else
3609 		limit = 120;
3610 
3611 	/*
3612 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3613 	 * allocation behaviour: Most allocs on one cpu, most free operations
3614 	 * on another cpu. For these cases, an efficient object passing between
3615 	 * cpus is necessary. This is provided by a shared array. The array
3616 	 * replaces Bonwick's magazine layer.
3617 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3618 	 * to a larger limit. Thus disabled by default.
3619 	 */
3620 	shared = 0;
3621 #ifdef CONFIG_SMP
3622 	if (cachep->buffer_size <= PAGE_SIZE)
3623 		shared = 8;
3624 #endif
3625 
3626 #if DEBUG
3627 	/*
3628 	 * With debugging enabled, large batchcount lead to excessively long
3629 	 * periods with disabled local interrupts. Limit the batchcount
3630 	 */
3631 	if (limit > 32)
3632 		limit = 32;
3633 #endif
3634 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
3635 	if (err)
3636 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3637 		       cachep->name, -err);
3638 }
3639 
3640 /*
3641  * Drain an array if it contains any elements taking the l3 lock only if
3642  * necessary. Note that the l3 listlock also protects the array_cache
3643  * if drain_array() is used on the shared array.
3644  */
3645 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3646 			 struct array_cache *ac, int force, int node)
3647 {
3648 	int tofree;
3649 
3650 	if (!ac || !ac->avail)
3651 		return;
3652 	if (ac->touched && !force) {
3653 		ac->touched = 0;
3654 	} else {
3655 		spin_lock_irq(&l3->list_lock);
3656 		if (ac->avail) {
3657 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3658 			if (tofree > ac->avail)
3659 				tofree = (ac->avail + 1) / 2;
3660 			free_block(cachep, ac->entry, tofree, node);
3661 			ac->avail -= tofree;
3662 			memmove(ac->entry, &(ac->entry[tofree]),
3663 				sizeof(void *) * ac->avail);
3664 		}
3665 		spin_unlock_irq(&l3->list_lock);
3666 	}
3667 }
3668 
3669 /**
3670  * cache_reap - Reclaim memory from caches.
3671  * @unused: unused parameter
3672  *
3673  * Called from workqueue/eventd every few seconds.
3674  * Purpose:
3675  * - clear the per-cpu caches for this CPU.
3676  * - return freeable pages to the main free memory pool.
3677  *
3678  * If we cannot acquire the cache chain mutex then just give up - we'll try
3679  * again on the next iteration.
3680  */
3681 static void cache_reap(void *unused)
3682 {
3683 	struct list_head *walk;
3684 	struct kmem_list3 *l3;
3685 	int node = numa_node_id();
3686 
3687 	if (!mutex_trylock(&cache_chain_mutex)) {
3688 		/* Give up. Setup the next iteration. */
3689 		schedule_delayed_work(&__get_cpu_var(reap_work),
3690 				      REAPTIMEOUT_CPUC);
3691 		return;
3692 	}
3693 
3694 	list_for_each(walk, &cache_chain) {
3695 		struct kmem_cache *searchp;
3696 		struct list_head *p;
3697 		int tofree;
3698 		struct slab *slabp;
3699 
3700 		searchp = list_entry(walk, struct kmem_cache, next);
3701 		check_irq_on();
3702 
3703 		/*
3704 		 * We only take the l3 lock if absolutely necessary and we
3705 		 * have established with reasonable certainty that
3706 		 * we can do some work if the lock was obtained.
3707 		 */
3708 		l3 = searchp->nodelists[node];
3709 
3710 		reap_alien(searchp, l3);
3711 
3712 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
3713 
3714 		/*
3715 		 * These are racy checks but it does not matter
3716 		 * if we skip one check or scan twice.
3717 		 */
3718 		if (time_after(l3->next_reap, jiffies))
3719 			goto next;
3720 
3721 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
3722 
3723 		drain_array(searchp, l3, l3->shared, 0, node);
3724 
3725 		if (l3->free_touched) {
3726 			l3->free_touched = 0;
3727 			goto next;
3728 		}
3729 
3730 		tofree = (l3->free_limit + 5 * searchp->num - 1) /
3731 				(5 * searchp->num);
3732 		do {
3733 			/*
3734 			 * Do not lock if there are no free blocks.
3735 			 */
3736 			if (list_empty(&l3->slabs_free))
3737 				break;
3738 
3739 			spin_lock_irq(&l3->list_lock);
3740 			p = l3->slabs_free.next;
3741 			if (p == &(l3->slabs_free)) {
3742 				spin_unlock_irq(&l3->list_lock);
3743 				break;
3744 			}
3745 
3746 			slabp = list_entry(p, struct slab, list);
3747 			BUG_ON(slabp->inuse);
3748 			list_del(&slabp->list);
3749 			STATS_INC_REAPED(searchp);
3750 
3751 			/*
3752 			 * Safe to drop the lock. The slab is no longer linked
3753 			 * to the cache. searchp cannot disappear, we hold
3754 			 * cache_chain_lock
3755 			 */
3756 			l3->free_objects -= searchp->num;
3757 			spin_unlock_irq(&l3->list_lock);
3758 			slab_destroy(searchp, slabp);
3759 		} while (--tofree > 0);
3760 next:
3761 		cond_resched();
3762 	}
3763 	check_irq_on();
3764 	mutex_unlock(&cache_chain_mutex);
3765 	next_reap_node();
3766 	/* Set up the next iteration */
3767 	schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3768 }
3769 
3770 #ifdef CONFIG_PROC_FS
3771 
3772 static void print_slabinfo_header(struct seq_file *m)
3773 {
3774 	/*
3775 	 * Output format version, so at least we can change it
3776 	 * without _too_ many complaints.
3777 	 */
3778 #if STATS
3779 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
3780 #else
3781 	seq_puts(m, "slabinfo - version: 2.1\n");
3782 #endif
3783 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
3784 		 "<objperslab> <pagesperslab>");
3785 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
3786 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
3787 #if STATS
3788 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
3789 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
3790 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
3791 #endif
3792 	seq_putc(m, '\n');
3793 }
3794 
3795 static void *s_start(struct seq_file *m, loff_t *pos)
3796 {
3797 	loff_t n = *pos;
3798 	struct list_head *p;
3799 
3800 	mutex_lock(&cache_chain_mutex);
3801 	if (!n)
3802 		print_slabinfo_header(m);
3803 	p = cache_chain.next;
3804 	while (n--) {
3805 		p = p->next;
3806 		if (p == &cache_chain)
3807 			return NULL;
3808 	}
3809 	return list_entry(p, struct kmem_cache, next);
3810 }
3811 
3812 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3813 {
3814 	struct kmem_cache *cachep = p;
3815 	++*pos;
3816 	return cachep->next.next == &cache_chain ?
3817 		NULL : list_entry(cachep->next.next, struct kmem_cache, next);
3818 }
3819 
3820 static void s_stop(struct seq_file *m, void *p)
3821 {
3822 	mutex_unlock(&cache_chain_mutex);
3823 }
3824 
3825 static int s_show(struct seq_file *m, void *p)
3826 {
3827 	struct kmem_cache *cachep = p;
3828 	struct list_head *q;
3829 	struct slab *slabp;
3830 	unsigned long active_objs;
3831 	unsigned long num_objs;
3832 	unsigned long active_slabs = 0;
3833 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
3834 	const char *name;
3835 	char *error = NULL;
3836 	int node;
3837 	struct kmem_list3 *l3;
3838 
3839 	active_objs = 0;
3840 	num_slabs = 0;
3841 	for_each_online_node(node) {
3842 		l3 = cachep->nodelists[node];
3843 		if (!l3)
3844 			continue;
3845 
3846 		check_irq_on();
3847 		spin_lock_irq(&l3->list_lock);
3848 
3849 		list_for_each(q, &l3->slabs_full) {
3850 			slabp = list_entry(q, struct slab, list);
3851 			if (slabp->inuse != cachep->num && !error)
3852 				error = "slabs_full accounting error";
3853 			active_objs += cachep->num;
3854 			active_slabs++;
3855 		}
3856 		list_for_each(q, &l3->slabs_partial) {
3857 			slabp = list_entry(q, struct slab, list);
3858 			if (slabp->inuse == cachep->num && !error)
3859 				error = "slabs_partial inuse accounting error";
3860 			if (!slabp->inuse && !error)
3861 				error = "slabs_partial/inuse accounting error";
3862 			active_objs += slabp->inuse;
3863 			active_slabs++;
3864 		}
3865 		list_for_each(q, &l3->slabs_free) {
3866 			slabp = list_entry(q, struct slab, list);
3867 			if (slabp->inuse && !error)
3868 				error = "slabs_free/inuse accounting error";
3869 			num_slabs++;
3870 		}
3871 		free_objects += l3->free_objects;
3872 		if (l3->shared)
3873 			shared_avail += l3->shared->avail;
3874 
3875 		spin_unlock_irq(&l3->list_lock);
3876 	}
3877 	num_slabs += active_slabs;
3878 	num_objs = num_slabs * cachep->num;
3879 	if (num_objs - active_objs != free_objects && !error)
3880 		error = "free_objects accounting error";
3881 
3882 	name = cachep->name;
3883 	if (error)
3884 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3885 
3886 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3887 		   name, active_objs, num_objs, cachep->buffer_size,
3888 		   cachep->num, (1 << cachep->gfporder));
3889 	seq_printf(m, " : tunables %4u %4u %4u",
3890 		   cachep->limit, cachep->batchcount, cachep->shared);
3891 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
3892 		   active_slabs, num_slabs, shared_avail);
3893 #if STATS
3894 	{			/* list3 stats */
3895 		unsigned long high = cachep->high_mark;
3896 		unsigned long allocs = cachep->num_allocations;
3897 		unsigned long grown = cachep->grown;
3898 		unsigned long reaped = cachep->reaped;
3899 		unsigned long errors = cachep->errors;
3900 		unsigned long max_freeable = cachep->max_freeable;
3901 		unsigned long node_allocs = cachep->node_allocs;
3902 		unsigned long node_frees = cachep->node_frees;
3903 		unsigned long overflows = cachep->node_overflow;
3904 
3905 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
3906 				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
3907 				reaped, errors, max_freeable, node_allocs,
3908 				node_frees, overflows);
3909 	}
3910 	/* cpu stats */
3911 	{
3912 		unsigned long allochit = atomic_read(&cachep->allochit);
3913 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3914 		unsigned long freehit = atomic_read(&cachep->freehit);
3915 		unsigned long freemiss = atomic_read(&cachep->freemiss);
3916 
3917 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
3918 			   allochit, allocmiss, freehit, freemiss);
3919 	}
3920 #endif
3921 	seq_putc(m, '\n');
3922 	return 0;
3923 }
3924 
3925 /*
3926  * slabinfo_op - iterator that generates /proc/slabinfo
3927  *
3928  * Output layout:
3929  * cache-name
3930  * num-active-objs
3931  * total-objs
3932  * object size
3933  * num-active-slabs
3934  * total-slabs
3935  * num-pages-per-slab
3936  * + further values on SMP and with statistics enabled
3937  */
3938 
3939 struct seq_operations slabinfo_op = {
3940 	.start = s_start,
3941 	.next = s_next,
3942 	.stop = s_stop,
3943 	.show = s_show,
3944 };
3945 
3946 #define MAX_SLABINFO_WRITE 128
3947 /**
3948  * slabinfo_write - Tuning for the slab allocator
3949  * @file: unused
3950  * @buffer: user buffer
3951  * @count: data length
3952  * @ppos: unused
3953  */
3954 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3955 		       size_t count, loff_t *ppos)
3956 {
3957 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
3958 	int limit, batchcount, shared, res;
3959 	struct list_head *p;
3960 
3961 	if (count > MAX_SLABINFO_WRITE)
3962 		return -EINVAL;
3963 	if (copy_from_user(&kbuf, buffer, count))
3964 		return -EFAULT;
3965 	kbuf[MAX_SLABINFO_WRITE] = '\0';
3966 
3967 	tmp = strchr(kbuf, ' ');
3968 	if (!tmp)
3969 		return -EINVAL;
3970 	*tmp = '\0';
3971 	tmp++;
3972 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
3973 		return -EINVAL;
3974 
3975 	/* Find the cache in the chain of caches. */
3976 	mutex_lock(&cache_chain_mutex);
3977 	res = -EINVAL;
3978 	list_for_each(p, &cache_chain) {
3979 		struct kmem_cache *cachep;
3980 
3981 		cachep = list_entry(p, struct kmem_cache, next);
3982 		if (!strcmp(cachep->name, kbuf)) {
3983 			if (limit < 1 || batchcount < 1 ||
3984 					batchcount > limit || shared < 0) {
3985 				res = 0;
3986 			} else {
3987 				res = do_tune_cpucache(cachep, limit,
3988 						       batchcount, shared);
3989 			}
3990 			break;
3991 		}
3992 	}
3993 	mutex_unlock(&cache_chain_mutex);
3994 	if (res >= 0)
3995 		res = count;
3996 	return res;
3997 }
3998 
3999 #ifdef CONFIG_DEBUG_SLAB_LEAK
4000 
4001 static void *leaks_start(struct seq_file *m, loff_t *pos)
4002 {
4003 	loff_t n = *pos;
4004 	struct list_head *p;
4005 
4006 	mutex_lock(&cache_chain_mutex);
4007 	p = cache_chain.next;
4008 	while (n--) {
4009 		p = p->next;
4010 		if (p == &cache_chain)
4011 			return NULL;
4012 	}
4013 	return list_entry(p, struct kmem_cache, next);
4014 }
4015 
4016 static inline int add_caller(unsigned long *n, unsigned long v)
4017 {
4018 	unsigned long *p;
4019 	int l;
4020 	if (!v)
4021 		return 1;
4022 	l = n[1];
4023 	p = n + 2;
4024 	while (l) {
4025 		int i = l/2;
4026 		unsigned long *q = p + 2 * i;
4027 		if (*q == v) {
4028 			q[1]++;
4029 			return 1;
4030 		}
4031 		if (*q > v) {
4032 			l = i;
4033 		} else {
4034 			p = q + 2;
4035 			l -= i + 1;
4036 		}
4037 	}
4038 	if (++n[1] == n[0])
4039 		return 0;
4040 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4041 	p[0] = v;
4042 	p[1] = 1;
4043 	return 1;
4044 }
4045 
4046 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4047 {
4048 	void *p;
4049 	int i;
4050 	if (n[0] == n[1])
4051 		return;
4052 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4053 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4054 			continue;
4055 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4056 			return;
4057 	}
4058 }
4059 
4060 static void show_symbol(struct seq_file *m, unsigned long address)
4061 {
4062 #ifdef CONFIG_KALLSYMS
4063 	char *modname;
4064 	const char *name;
4065 	unsigned long offset, size;
4066 	char namebuf[KSYM_NAME_LEN+1];
4067 
4068 	name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
4069 
4070 	if (name) {
4071 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4072 		if (modname)
4073 			seq_printf(m, " [%s]", modname);
4074 		return;
4075 	}
4076 #endif
4077 	seq_printf(m, "%p", (void *)address);
4078 }
4079 
4080 static int leaks_show(struct seq_file *m, void *p)
4081 {
4082 	struct kmem_cache *cachep = p;
4083 	struct list_head *q;
4084 	struct slab *slabp;
4085 	struct kmem_list3 *l3;
4086 	const char *name;
4087 	unsigned long *n = m->private;
4088 	int node;
4089 	int i;
4090 
4091 	if (!(cachep->flags & SLAB_STORE_USER))
4092 		return 0;
4093 	if (!(cachep->flags & SLAB_RED_ZONE))
4094 		return 0;
4095 
4096 	/* OK, we can do it */
4097 
4098 	n[1] = 0;
4099 
4100 	for_each_online_node(node) {
4101 		l3 = cachep->nodelists[node];
4102 		if (!l3)
4103 			continue;
4104 
4105 		check_irq_on();
4106 		spin_lock_irq(&l3->list_lock);
4107 
4108 		list_for_each(q, &l3->slabs_full) {
4109 			slabp = list_entry(q, struct slab, list);
4110 			handle_slab(n, cachep, slabp);
4111 		}
4112 		list_for_each(q, &l3->slabs_partial) {
4113 			slabp = list_entry(q, struct slab, list);
4114 			handle_slab(n, cachep, slabp);
4115 		}
4116 		spin_unlock_irq(&l3->list_lock);
4117 	}
4118 	name = cachep->name;
4119 	if (n[0] == n[1]) {
4120 		/* Increase the buffer size */
4121 		mutex_unlock(&cache_chain_mutex);
4122 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4123 		if (!m->private) {
4124 			/* Too bad, we are really out */
4125 			m->private = n;
4126 			mutex_lock(&cache_chain_mutex);
4127 			return -ENOMEM;
4128 		}
4129 		*(unsigned long *)m->private = n[0] * 2;
4130 		kfree(n);
4131 		mutex_lock(&cache_chain_mutex);
4132 		/* Now make sure this entry will be retried */
4133 		m->count = m->size;
4134 		return 0;
4135 	}
4136 	for (i = 0; i < n[1]; i++) {
4137 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4138 		show_symbol(m, n[2*i+2]);
4139 		seq_putc(m, '\n');
4140 	}
4141 	return 0;
4142 }
4143 
4144 struct seq_operations slabstats_op = {
4145 	.start = leaks_start,
4146 	.next = s_next,
4147 	.stop = s_stop,
4148 	.show = leaks_show,
4149 };
4150 #endif
4151 #endif
4152 
4153 /**
4154  * ksize - get the actual amount of memory allocated for a given object
4155  * @objp: Pointer to the object
4156  *
4157  * kmalloc may internally round up allocations and return more memory
4158  * than requested. ksize() can be used to determine the actual amount of
4159  * memory allocated. The caller may use this additional memory, even though
4160  * a smaller amount of memory was initially specified with the kmalloc call.
4161  * The caller must guarantee that objp points to a valid object previously
4162  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4163  * must not be freed during the duration of the call.
4164  */
4165 unsigned int ksize(const void *objp)
4166 {
4167 	if (unlikely(objp == NULL))
4168 		return 0;
4169 
4170 	return obj_size(virt_to_cache(objp));
4171 }
4172