xref: /openbmc/linux/mm/slab.c (revision c21b37f6)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same intializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/seq_file.h>
99 #include	<linux/notifier.h>
100 #include	<linux/kallsyms.h>
101 #include	<linux/cpu.h>
102 #include	<linux/sysctl.h>
103 #include	<linux/module.h>
104 #include	<linux/rcupdate.h>
105 #include	<linux/string.h>
106 #include	<linux/uaccess.h>
107 #include	<linux/nodemask.h>
108 #include	<linux/mempolicy.h>
109 #include	<linux/mutex.h>
110 #include	<linux/fault-inject.h>
111 #include	<linux/rtmutex.h>
112 #include	<linux/reciprocal_div.h>
113 
114 #include	<asm/cacheflush.h>
115 #include	<asm/tlbflush.h>
116 #include	<asm/page.h>
117 
118 /*
119  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
120  *		  0 for faster, smaller code (especially in the critical paths).
121  *
122  * STATS	- 1 to collect stats for /proc/slabinfo.
123  *		  0 for faster, smaller code (especially in the critical paths).
124  *
125  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
126  */
127 
128 #ifdef CONFIG_DEBUG_SLAB
129 #define	DEBUG		1
130 #define	STATS		1
131 #define	FORCED_DEBUG	1
132 #else
133 #define	DEBUG		0
134 #define	STATS		0
135 #define	FORCED_DEBUG	0
136 #endif
137 
138 /* Shouldn't this be in a header file somewhere? */
139 #define	BYTES_PER_WORD		sizeof(void *)
140 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
141 
142 #ifndef cache_line_size
143 #define cache_line_size()	L1_CACHE_BYTES
144 #endif
145 
146 #ifndef ARCH_KMALLOC_MINALIGN
147 /*
148  * Enforce a minimum alignment for the kmalloc caches.
149  * Usually, the kmalloc caches are cache_line_size() aligned, except when
150  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
151  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
152  * alignment larger than the alignment of a 64-bit integer.
153  * ARCH_KMALLOC_MINALIGN allows that.
154  * Note that increasing this value may disable some debug features.
155  */
156 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
157 #endif
158 
159 #ifndef ARCH_SLAB_MINALIGN
160 /*
161  * Enforce a minimum alignment for all caches.
162  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
163  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
164  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
165  * some debug features.
166  */
167 #define ARCH_SLAB_MINALIGN 0
168 #endif
169 
170 #ifndef ARCH_KMALLOC_FLAGS
171 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
172 #endif
173 
174 /* Legal flag mask for kmem_cache_create(). */
175 #if DEBUG
176 # define CREATE_MASK	(SLAB_RED_ZONE | \
177 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
178 			 SLAB_CACHE_DMA | \
179 			 SLAB_STORE_USER | \
180 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
181 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
182 #else
183 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
184 			 SLAB_CACHE_DMA | \
185 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
186 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
187 #endif
188 
189 /*
190  * kmem_bufctl_t:
191  *
192  * Bufctl's are used for linking objs within a slab
193  * linked offsets.
194  *
195  * This implementation relies on "struct page" for locating the cache &
196  * slab an object belongs to.
197  * This allows the bufctl structure to be small (one int), but limits
198  * the number of objects a slab (not a cache) can contain when off-slab
199  * bufctls are used. The limit is the size of the largest general cache
200  * that does not use off-slab slabs.
201  * For 32bit archs with 4 kB pages, is this 56.
202  * This is not serious, as it is only for large objects, when it is unwise
203  * to have too many per slab.
204  * Note: This limit can be raised by introducing a general cache whose size
205  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
206  */
207 
208 typedef unsigned int kmem_bufctl_t;
209 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
210 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
211 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
212 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
213 
214 /*
215  * struct slab
216  *
217  * Manages the objs in a slab. Placed either at the beginning of mem allocated
218  * for a slab, or allocated from an general cache.
219  * Slabs are chained into three list: fully used, partial, fully free slabs.
220  */
221 struct slab {
222 	struct list_head list;
223 	unsigned long colouroff;
224 	void *s_mem;		/* including colour offset */
225 	unsigned int inuse;	/* num of objs active in slab */
226 	kmem_bufctl_t free;
227 	unsigned short nodeid;
228 };
229 
230 /*
231  * struct slab_rcu
232  *
233  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
234  * arrange for kmem_freepages to be called via RCU.  This is useful if
235  * we need to approach a kernel structure obliquely, from its address
236  * obtained without the usual locking.  We can lock the structure to
237  * stabilize it and check it's still at the given address, only if we
238  * can be sure that the memory has not been meanwhile reused for some
239  * other kind of object (which our subsystem's lock might corrupt).
240  *
241  * rcu_read_lock before reading the address, then rcu_read_unlock after
242  * taking the spinlock within the structure expected at that address.
243  *
244  * We assume struct slab_rcu can overlay struct slab when destroying.
245  */
246 struct slab_rcu {
247 	struct rcu_head head;
248 	struct kmem_cache *cachep;
249 	void *addr;
250 };
251 
252 /*
253  * struct array_cache
254  *
255  * Purpose:
256  * - LIFO ordering, to hand out cache-warm objects from _alloc
257  * - reduce the number of linked list operations
258  * - reduce spinlock operations
259  *
260  * The limit is stored in the per-cpu structure to reduce the data cache
261  * footprint.
262  *
263  */
264 struct array_cache {
265 	unsigned int avail;
266 	unsigned int limit;
267 	unsigned int batchcount;
268 	unsigned int touched;
269 	spinlock_t lock;
270 	void *entry[0];	/*
271 			 * Must have this definition in here for the proper
272 			 * alignment of array_cache. Also simplifies accessing
273 			 * the entries.
274 			 * [0] is for gcc 2.95. It should really be [].
275 			 */
276 };
277 
278 /*
279  * bootstrap: The caches do not work without cpuarrays anymore, but the
280  * cpuarrays are allocated from the generic caches...
281  */
282 #define BOOT_CPUCACHE_ENTRIES	1
283 struct arraycache_init {
284 	struct array_cache cache;
285 	void *entries[BOOT_CPUCACHE_ENTRIES];
286 };
287 
288 /*
289  * The slab lists for all objects.
290  */
291 struct kmem_list3 {
292 	struct list_head slabs_partial;	/* partial list first, better asm code */
293 	struct list_head slabs_full;
294 	struct list_head slabs_free;
295 	unsigned long free_objects;
296 	unsigned int free_limit;
297 	unsigned int colour_next;	/* Per-node cache coloring */
298 	spinlock_t list_lock;
299 	struct array_cache *shared;	/* shared per node */
300 	struct array_cache **alien;	/* on other nodes */
301 	unsigned long next_reap;	/* updated without locking */
302 	int free_touched;		/* updated without locking */
303 };
304 
305 /*
306  * Need this for bootstrapping a per node allocator.
307  */
308 #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
309 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
310 #define	CACHE_CACHE 0
311 #define	SIZE_AC 1
312 #define	SIZE_L3 (1 + MAX_NUMNODES)
313 
314 static int drain_freelist(struct kmem_cache *cache,
315 			struct kmem_list3 *l3, int tofree);
316 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 			int node);
318 static int enable_cpucache(struct kmem_cache *cachep);
319 static void cache_reap(struct work_struct *unused);
320 
321 /*
322  * This function must be completely optimized away if a constant is passed to
323  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
324  */
325 static __always_inline int index_of(const size_t size)
326 {
327 	extern void __bad_size(void);
328 
329 	if (__builtin_constant_p(size)) {
330 		int i = 0;
331 
332 #define CACHE(x) \
333 	if (size <=x) \
334 		return i; \
335 	else \
336 		i++;
337 #include "linux/kmalloc_sizes.h"
338 #undef CACHE
339 		__bad_size();
340 	} else
341 		__bad_size();
342 	return 0;
343 }
344 
345 static int slab_early_init = 1;
346 
347 #define INDEX_AC index_of(sizeof(struct arraycache_init))
348 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
349 
350 static void kmem_list3_init(struct kmem_list3 *parent)
351 {
352 	INIT_LIST_HEAD(&parent->slabs_full);
353 	INIT_LIST_HEAD(&parent->slabs_partial);
354 	INIT_LIST_HEAD(&parent->slabs_free);
355 	parent->shared = NULL;
356 	parent->alien = NULL;
357 	parent->colour_next = 0;
358 	spin_lock_init(&parent->list_lock);
359 	parent->free_objects = 0;
360 	parent->free_touched = 0;
361 }
362 
363 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
364 	do {								\
365 		INIT_LIST_HEAD(listp);					\
366 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
367 	} while (0)
368 
369 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
370 	do {								\
371 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
372 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
373 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
374 	} while (0)
375 
376 /*
377  * struct kmem_cache
378  *
379  * manages a cache.
380  */
381 
382 struct kmem_cache {
383 /* 1) per-cpu data, touched during every alloc/free */
384 	struct array_cache *array[NR_CPUS];
385 /* 2) Cache tunables. Protected by cache_chain_mutex */
386 	unsigned int batchcount;
387 	unsigned int limit;
388 	unsigned int shared;
389 
390 	unsigned int buffer_size;
391 	u32 reciprocal_buffer_size;
392 /* 3) touched by every alloc & free from the backend */
393 
394 	unsigned int flags;		/* constant flags */
395 	unsigned int num;		/* # of objs per slab */
396 
397 /* 4) cache_grow/shrink */
398 	/* order of pgs per slab (2^n) */
399 	unsigned int gfporder;
400 
401 	/* force GFP flags, e.g. GFP_DMA */
402 	gfp_t gfpflags;
403 
404 	size_t colour;			/* cache colouring range */
405 	unsigned int colour_off;	/* colour offset */
406 	struct kmem_cache *slabp_cache;
407 	unsigned int slab_size;
408 	unsigned int dflags;		/* dynamic flags */
409 
410 	/* constructor func */
411 	void (*ctor) (void *, struct kmem_cache *, unsigned long);
412 
413 /* 5) cache creation/removal */
414 	const char *name;
415 	struct list_head next;
416 
417 /* 6) statistics */
418 #if STATS
419 	unsigned long num_active;
420 	unsigned long num_allocations;
421 	unsigned long high_mark;
422 	unsigned long grown;
423 	unsigned long reaped;
424 	unsigned long errors;
425 	unsigned long max_freeable;
426 	unsigned long node_allocs;
427 	unsigned long node_frees;
428 	unsigned long node_overflow;
429 	atomic_t allochit;
430 	atomic_t allocmiss;
431 	atomic_t freehit;
432 	atomic_t freemiss;
433 #endif
434 #if DEBUG
435 	/*
436 	 * If debugging is enabled, then the allocator can add additional
437 	 * fields and/or padding to every object. buffer_size contains the total
438 	 * object size including these internal fields, the following two
439 	 * variables contain the offset to the user object and its size.
440 	 */
441 	int obj_offset;
442 	int obj_size;
443 #endif
444 	/*
445 	 * We put nodelists[] at the end of kmem_cache, because we want to size
446 	 * this array to nr_node_ids slots instead of MAX_NUMNODES
447 	 * (see kmem_cache_init())
448 	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
449 	 * is statically defined, so we reserve the max number of nodes.
450 	 */
451 	struct kmem_list3 *nodelists[MAX_NUMNODES];
452 	/*
453 	 * Do not add fields after nodelists[]
454 	 */
455 };
456 
457 #define CFLGS_OFF_SLAB		(0x80000000UL)
458 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
459 
460 #define BATCHREFILL_LIMIT	16
461 /*
462  * Optimization question: fewer reaps means less probability for unnessary
463  * cpucache drain/refill cycles.
464  *
465  * OTOH the cpuarrays can contain lots of objects,
466  * which could lock up otherwise freeable slabs.
467  */
468 #define REAPTIMEOUT_CPUC	(2*HZ)
469 #define REAPTIMEOUT_LIST3	(4*HZ)
470 
471 #if STATS
472 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
473 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
474 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
475 #define	STATS_INC_GROWN(x)	((x)->grown++)
476 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
477 #define	STATS_SET_HIGH(x)						\
478 	do {								\
479 		if ((x)->num_active > (x)->high_mark)			\
480 			(x)->high_mark = (x)->num_active;		\
481 	} while (0)
482 #define	STATS_INC_ERR(x)	((x)->errors++)
483 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
484 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
485 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
486 #define	STATS_SET_FREEABLE(x, i)					\
487 	do {								\
488 		if ((x)->max_freeable < i)				\
489 			(x)->max_freeable = i;				\
490 	} while (0)
491 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
492 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
493 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
494 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
495 #else
496 #define	STATS_INC_ACTIVE(x)	do { } while (0)
497 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
498 #define	STATS_INC_ALLOCED(x)	do { } while (0)
499 #define	STATS_INC_GROWN(x)	do { } while (0)
500 #define	STATS_ADD_REAPED(x,y)	do { } while (0)
501 #define	STATS_SET_HIGH(x)	do { } while (0)
502 #define	STATS_INC_ERR(x)	do { } while (0)
503 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
504 #define	STATS_INC_NODEFREES(x)	do { } while (0)
505 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
506 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
507 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
508 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
509 #define STATS_INC_FREEHIT(x)	do { } while (0)
510 #define STATS_INC_FREEMISS(x)	do { } while (0)
511 #endif
512 
513 #if DEBUG
514 
515 /*
516  * memory layout of objects:
517  * 0		: objp
518  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
519  * 		the end of an object is aligned with the end of the real
520  * 		allocation. Catches writes behind the end of the allocation.
521  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
522  * 		redzone word.
523  * cachep->obj_offset: The real object.
524  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
525  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
526  *					[BYTES_PER_WORD long]
527  */
528 static int obj_offset(struct kmem_cache *cachep)
529 {
530 	return cachep->obj_offset;
531 }
532 
533 static int obj_size(struct kmem_cache *cachep)
534 {
535 	return cachep->obj_size;
536 }
537 
538 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
539 {
540 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
541 	return (unsigned long long*) (objp + obj_offset(cachep) -
542 				      sizeof(unsigned long long));
543 }
544 
545 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
546 {
547 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
548 	if (cachep->flags & SLAB_STORE_USER)
549 		return (unsigned long long *)(objp + cachep->buffer_size -
550 					      sizeof(unsigned long long) -
551 					      REDZONE_ALIGN);
552 	return (unsigned long long *) (objp + cachep->buffer_size -
553 				       sizeof(unsigned long long));
554 }
555 
556 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
557 {
558 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
559 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
560 }
561 
562 #else
563 
564 #define obj_offset(x)			0
565 #define obj_size(cachep)		(cachep->buffer_size)
566 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
567 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
568 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
569 
570 #endif
571 
572 /*
573  * Do not go above this order unless 0 objects fit into the slab.
574  */
575 #define	BREAK_GFP_ORDER_HI	1
576 #define	BREAK_GFP_ORDER_LO	0
577 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
578 
579 /*
580  * Functions for storing/retrieving the cachep and or slab from the page
581  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
582  * these are used to find the cache which an obj belongs to.
583  */
584 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
585 {
586 	page->lru.next = (struct list_head *)cache;
587 }
588 
589 static inline struct kmem_cache *page_get_cache(struct page *page)
590 {
591 	page = compound_head(page);
592 	BUG_ON(!PageSlab(page));
593 	return (struct kmem_cache *)page->lru.next;
594 }
595 
596 static inline void page_set_slab(struct page *page, struct slab *slab)
597 {
598 	page->lru.prev = (struct list_head *)slab;
599 }
600 
601 static inline struct slab *page_get_slab(struct page *page)
602 {
603 	BUG_ON(!PageSlab(page));
604 	return (struct slab *)page->lru.prev;
605 }
606 
607 static inline struct kmem_cache *virt_to_cache(const void *obj)
608 {
609 	struct page *page = virt_to_head_page(obj);
610 	return page_get_cache(page);
611 }
612 
613 static inline struct slab *virt_to_slab(const void *obj)
614 {
615 	struct page *page = virt_to_head_page(obj);
616 	return page_get_slab(page);
617 }
618 
619 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
620 				 unsigned int idx)
621 {
622 	return slab->s_mem + cache->buffer_size * idx;
623 }
624 
625 /*
626  * We want to avoid an expensive divide : (offset / cache->buffer_size)
627  *   Using the fact that buffer_size is a constant for a particular cache,
628  *   we can replace (offset / cache->buffer_size) by
629  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
630  */
631 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
632 					const struct slab *slab, void *obj)
633 {
634 	u32 offset = (obj - slab->s_mem);
635 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
636 }
637 
638 /*
639  * These are the default caches for kmalloc. Custom caches can have other sizes.
640  */
641 struct cache_sizes malloc_sizes[] = {
642 #define CACHE(x) { .cs_size = (x) },
643 #include <linux/kmalloc_sizes.h>
644 	CACHE(ULONG_MAX)
645 #undef CACHE
646 };
647 EXPORT_SYMBOL(malloc_sizes);
648 
649 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
650 struct cache_names {
651 	char *name;
652 	char *name_dma;
653 };
654 
655 static struct cache_names __initdata cache_names[] = {
656 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
657 #include <linux/kmalloc_sizes.h>
658 	{NULL,}
659 #undef CACHE
660 };
661 
662 static struct arraycache_init initarray_cache __initdata =
663     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
664 static struct arraycache_init initarray_generic =
665     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
666 
667 /* internal cache of cache description objs */
668 static struct kmem_cache cache_cache = {
669 	.batchcount = 1,
670 	.limit = BOOT_CPUCACHE_ENTRIES,
671 	.shared = 1,
672 	.buffer_size = sizeof(struct kmem_cache),
673 	.name = "kmem_cache",
674 };
675 
676 #define BAD_ALIEN_MAGIC 0x01020304ul
677 
678 #ifdef CONFIG_LOCKDEP
679 
680 /*
681  * Slab sometimes uses the kmalloc slabs to store the slab headers
682  * for other slabs "off slab".
683  * The locking for this is tricky in that it nests within the locks
684  * of all other slabs in a few places; to deal with this special
685  * locking we put on-slab caches into a separate lock-class.
686  *
687  * We set lock class for alien array caches which are up during init.
688  * The lock annotation will be lost if all cpus of a node goes down and
689  * then comes back up during hotplug
690  */
691 static struct lock_class_key on_slab_l3_key;
692 static struct lock_class_key on_slab_alc_key;
693 
694 static inline void init_lock_keys(void)
695 
696 {
697 	int q;
698 	struct cache_sizes *s = malloc_sizes;
699 
700 	while (s->cs_size != ULONG_MAX) {
701 		for_each_node(q) {
702 			struct array_cache **alc;
703 			int r;
704 			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
705 			if (!l3 || OFF_SLAB(s->cs_cachep))
706 				continue;
707 			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
708 			alc = l3->alien;
709 			/*
710 			 * FIXME: This check for BAD_ALIEN_MAGIC
711 			 * should go away when common slab code is taught to
712 			 * work even without alien caches.
713 			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
714 			 * for alloc_alien_cache,
715 			 */
716 			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
717 				continue;
718 			for_each_node(r) {
719 				if (alc[r])
720 					lockdep_set_class(&alc[r]->lock,
721 					     &on_slab_alc_key);
722 			}
723 		}
724 		s++;
725 	}
726 }
727 #else
728 static inline void init_lock_keys(void)
729 {
730 }
731 #endif
732 
733 /*
734  * 1. Guard access to the cache-chain.
735  * 2. Protect sanity of cpu_online_map against cpu hotplug events
736  */
737 static DEFINE_MUTEX(cache_chain_mutex);
738 static struct list_head cache_chain;
739 
740 /*
741  * chicken and egg problem: delay the per-cpu array allocation
742  * until the general caches are up.
743  */
744 static enum {
745 	NONE,
746 	PARTIAL_AC,
747 	PARTIAL_L3,
748 	FULL
749 } g_cpucache_up;
750 
751 /*
752  * used by boot code to determine if it can use slab based allocator
753  */
754 int slab_is_available(void)
755 {
756 	return g_cpucache_up == FULL;
757 }
758 
759 static DEFINE_PER_CPU(struct delayed_work, reap_work);
760 
761 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
762 {
763 	return cachep->array[smp_processor_id()];
764 }
765 
766 static inline struct kmem_cache *__find_general_cachep(size_t size,
767 							gfp_t gfpflags)
768 {
769 	struct cache_sizes *csizep = malloc_sizes;
770 
771 #if DEBUG
772 	/* This happens if someone tries to call
773 	 * kmem_cache_create(), or __kmalloc(), before
774 	 * the generic caches are initialized.
775 	 */
776 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
777 #endif
778 	if (!size)
779 		return ZERO_SIZE_PTR;
780 
781 	while (size > csizep->cs_size)
782 		csizep++;
783 
784 	/*
785 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
786 	 * has cs_{dma,}cachep==NULL. Thus no special case
787 	 * for large kmalloc calls required.
788 	 */
789 #ifdef CONFIG_ZONE_DMA
790 	if (unlikely(gfpflags & GFP_DMA))
791 		return csizep->cs_dmacachep;
792 #endif
793 	return csizep->cs_cachep;
794 }
795 
796 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
797 {
798 	return __find_general_cachep(size, gfpflags);
799 }
800 
801 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
802 {
803 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
804 }
805 
806 /*
807  * Calculate the number of objects and left-over bytes for a given buffer size.
808  */
809 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
810 			   size_t align, int flags, size_t *left_over,
811 			   unsigned int *num)
812 {
813 	int nr_objs;
814 	size_t mgmt_size;
815 	size_t slab_size = PAGE_SIZE << gfporder;
816 
817 	/*
818 	 * The slab management structure can be either off the slab or
819 	 * on it. For the latter case, the memory allocated for a
820 	 * slab is used for:
821 	 *
822 	 * - The struct slab
823 	 * - One kmem_bufctl_t for each object
824 	 * - Padding to respect alignment of @align
825 	 * - @buffer_size bytes for each object
826 	 *
827 	 * If the slab management structure is off the slab, then the
828 	 * alignment will already be calculated into the size. Because
829 	 * the slabs are all pages aligned, the objects will be at the
830 	 * correct alignment when allocated.
831 	 */
832 	if (flags & CFLGS_OFF_SLAB) {
833 		mgmt_size = 0;
834 		nr_objs = slab_size / buffer_size;
835 
836 		if (nr_objs > SLAB_LIMIT)
837 			nr_objs = SLAB_LIMIT;
838 	} else {
839 		/*
840 		 * Ignore padding for the initial guess. The padding
841 		 * is at most @align-1 bytes, and @buffer_size is at
842 		 * least @align. In the worst case, this result will
843 		 * be one greater than the number of objects that fit
844 		 * into the memory allocation when taking the padding
845 		 * into account.
846 		 */
847 		nr_objs = (slab_size - sizeof(struct slab)) /
848 			  (buffer_size + sizeof(kmem_bufctl_t));
849 
850 		/*
851 		 * This calculated number will be either the right
852 		 * amount, or one greater than what we want.
853 		 */
854 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
855 		       > slab_size)
856 			nr_objs--;
857 
858 		if (nr_objs > SLAB_LIMIT)
859 			nr_objs = SLAB_LIMIT;
860 
861 		mgmt_size = slab_mgmt_size(nr_objs, align);
862 	}
863 	*num = nr_objs;
864 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
865 }
866 
867 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
868 
869 static void __slab_error(const char *function, struct kmem_cache *cachep,
870 			char *msg)
871 {
872 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
873 	       function, cachep->name, msg);
874 	dump_stack();
875 }
876 
877 /*
878  * By default on NUMA we use alien caches to stage the freeing of
879  * objects allocated from other nodes. This causes massive memory
880  * inefficiencies when using fake NUMA setup to split memory into a
881  * large number of small nodes, so it can be disabled on the command
882  * line
883   */
884 
885 static int use_alien_caches __read_mostly = 1;
886 static int __init noaliencache_setup(char *s)
887 {
888 	use_alien_caches = 0;
889 	return 1;
890 }
891 __setup("noaliencache", noaliencache_setup);
892 
893 #ifdef CONFIG_NUMA
894 /*
895  * Special reaping functions for NUMA systems called from cache_reap().
896  * These take care of doing round robin flushing of alien caches (containing
897  * objects freed on different nodes from which they were allocated) and the
898  * flushing of remote pcps by calling drain_node_pages.
899  */
900 static DEFINE_PER_CPU(unsigned long, reap_node);
901 
902 static void init_reap_node(int cpu)
903 {
904 	int node;
905 
906 	node = next_node(cpu_to_node(cpu), node_online_map);
907 	if (node == MAX_NUMNODES)
908 		node = first_node(node_online_map);
909 
910 	per_cpu(reap_node, cpu) = node;
911 }
912 
913 static void next_reap_node(void)
914 {
915 	int node = __get_cpu_var(reap_node);
916 
917 	node = next_node(node, node_online_map);
918 	if (unlikely(node >= MAX_NUMNODES))
919 		node = first_node(node_online_map);
920 	__get_cpu_var(reap_node) = node;
921 }
922 
923 #else
924 #define init_reap_node(cpu) do { } while (0)
925 #define next_reap_node(void) do { } while (0)
926 #endif
927 
928 /*
929  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
930  * via the workqueue/eventd.
931  * Add the CPU number into the expiration time to minimize the possibility of
932  * the CPUs getting into lockstep and contending for the global cache chain
933  * lock.
934  */
935 static void __cpuinit start_cpu_timer(int cpu)
936 {
937 	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
938 
939 	/*
940 	 * When this gets called from do_initcalls via cpucache_init(),
941 	 * init_workqueues() has already run, so keventd will be setup
942 	 * at that time.
943 	 */
944 	if (keventd_up() && reap_work->work.func == NULL) {
945 		init_reap_node(cpu);
946 		INIT_DELAYED_WORK(reap_work, cache_reap);
947 		schedule_delayed_work_on(cpu, reap_work,
948 					__round_jiffies_relative(HZ, cpu));
949 	}
950 }
951 
952 static struct array_cache *alloc_arraycache(int node, int entries,
953 					    int batchcount)
954 {
955 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
956 	struct array_cache *nc = NULL;
957 
958 	nc = kmalloc_node(memsize, GFP_KERNEL, node);
959 	if (nc) {
960 		nc->avail = 0;
961 		nc->limit = entries;
962 		nc->batchcount = batchcount;
963 		nc->touched = 0;
964 		spin_lock_init(&nc->lock);
965 	}
966 	return nc;
967 }
968 
969 /*
970  * Transfer objects in one arraycache to another.
971  * Locking must be handled by the caller.
972  *
973  * Return the number of entries transferred.
974  */
975 static int transfer_objects(struct array_cache *to,
976 		struct array_cache *from, unsigned int max)
977 {
978 	/* Figure out how many entries to transfer */
979 	int nr = min(min(from->avail, max), to->limit - to->avail);
980 
981 	if (!nr)
982 		return 0;
983 
984 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
985 			sizeof(void *) *nr);
986 
987 	from->avail -= nr;
988 	to->avail += nr;
989 	to->touched = 1;
990 	return nr;
991 }
992 
993 #ifndef CONFIG_NUMA
994 
995 #define drain_alien_cache(cachep, alien) do { } while (0)
996 #define reap_alien(cachep, l3) do { } while (0)
997 
998 static inline struct array_cache **alloc_alien_cache(int node, int limit)
999 {
1000 	return (struct array_cache **)BAD_ALIEN_MAGIC;
1001 }
1002 
1003 static inline void free_alien_cache(struct array_cache **ac_ptr)
1004 {
1005 }
1006 
1007 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1008 {
1009 	return 0;
1010 }
1011 
1012 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1013 		gfp_t flags)
1014 {
1015 	return NULL;
1016 }
1017 
1018 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1019 		 gfp_t flags, int nodeid)
1020 {
1021 	return NULL;
1022 }
1023 
1024 #else	/* CONFIG_NUMA */
1025 
1026 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1027 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1028 
1029 static struct array_cache **alloc_alien_cache(int node, int limit)
1030 {
1031 	struct array_cache **ac_ptr;
1032 	int memsize = sizeof(void *) * nr_node_ids;
1033 	int i;
1034 
1035 	if (limit > 1)
1036 		limit = 12;
1037 	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1038 	if (ac_ptr) {
1039 		for_each_node(i) {
1040 			if (i == node || !node_online(i)) {
1041 				ac_ptr[i] = NULL;
1042 				continue;
1043 			}
1044 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1045 			if (!ac_ptr[i]) {
1046 				for (i--; i <= 0; i--)
1047 					kfree(ac_ptr[i]);
1048 				kfree(ac_ptr);
1049 				return NULL;
1050 			}
1051 		}
1052 	}
1053 	return ac_ptr;
1054 }
1055 
1056 static void free_alien_cache(struct array_cache **ac_ptr)
1057 {
1058 	int i;
1059 
1060 	if (!ac_ptr)
1061 		return;
1062 	for_each_node(i)
1063 	    kfree(ac_ptr[i]);
1064 	kfree(ac_ptr);
1065 }
1066 
1067 static void __drain_alien_cache(struct kmem_cache *cachep,
1068 				struct array_cache *ac, int node)
1069 {
1070 	struct kmem_list3 *rl3 = cachep->nodelists[node];
1071 
1072 	if (ac->avail) {
1073 		spin_lock(&rl3->list_lock);
1074 		/*
1075 		 * Stuff objects into the remote nodes shared array first.
1076 		 * That way we could avoid the overhead of putting the objects
1077 		 * into the free lists and getting them back later.
1078 		 */
1079 		if (rl3->shared)
1080 			transfer_objects(rl3->shared, ac, ac->limit);
1081 
1082 		free_block(cachep, ac->entry, ac->avail, node);
1083 		ac->avail = 0;
1084 		spin_unlock(&rl3->list_lock);
1085 	}
1086 }
1087 
1088 /*
1089  * Called from cache_reap() to regularly drain alien caches round robin.
1090  */
1091 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1092 {
1093 	int node = __get_cpu_var(reap_node);
1094 
1095 	if (l3->alien) {
1096 		struct array_cache *ac = l3->alien[node];
1097 
1098 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1099 			__drain_alien_cache(cachep, ac, node);
1100 			spin_unlock_irq(&ac->lock);
1101 		}
1102 	}
1103 }
1104 
1105 static void drain_alien_cache(struct kmem_cache *cachep,
1106 				struct array_cache **alien)
1107 {
1108 	int i = 0;
1109 	struct array_cache *ac;
1110 	unsigned long flags;
1111 
1112 	for_each_online_node(i) {
1113 		ac = alien[i];
1114 		if (ac) {
1115 			spin_lock_irqsave(&ac->lock, flags);
1116 			__drain_alien_cache(cachep, ac, i);
1117 			spin_unlock_irqrestore(&ac->lock, flags);
1118 		}
1119 	}
1120 }
1121 
1122 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1123 {
1124 	struct slab *slabp = virt_to_slab(objp);
1125 	int nodeid = slabp->nodeid;
1126 	struct kmem_list3 *l3;
1127 	struct array_cache *alien = NULL;
1128 	int node;
1129 
1130 	node = numa_node_id();
1131 
1132 	/*
1133 	 * Make sure we are not freeing a object from another node to the array
1134 	 * cache on this cpu.
1135 	 */
1136 	if (likely(slabp->nodeid == node))
1137 		return 0;
1138 
1139 	l3 = cachep->nodelists[node];
1140 	STATS_INC_NODEFREES(cachep);
1141 	if (l3->alien && l3->alien[nodeid]) {
1142 		alien = l3->alien[nodeid];
1143 		spin_lock(&alien->lock);
1144 		if (unlikely(alien->avail == alien->limit)) {
1145 			STATS_INC_ACOVERFLOW(cachep);
1146 			__drain_alien_cache(cachep, alien, nodeid);
1147 		}
1148 		alien->entry[alien->avail++] = objp;
1149 		spin_unlock(&alien->lock);
1150 	} else {
1151 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1152 		free_block(cachep, &objp, 1, nodeid);
1153 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1154 	}
1155 	return 1;
1156 }
1157 #endif
1158 
1159 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1160 				    unsigned long action, void *hcpu)
1161 {
1162 	long cpu = (long)hcpu;
1163 	struct kmem_cache *cachep;
1164 	struct kmem_list3 *l3 = NULL;
1165 	int node = cpu_to_node(cpu);
1166 	const int memsize = sizeof(struct kmem_list3);
1167 
1168 	switch (action) {
1169 	case CPU_LOCK_ACQUIRE:
1170 		mutex_lock(&cache_chain_mutex);
1171 		break;
1172 	case CPU_UP_PREPARE:
1173 	case CPU_UP_PREPARE_FROZEN:
1174 		/*
1175 		 * We need to do this right in the beginning since
1176 		 * alloc_arraycache's are going to use this list.
1177 		 * kmalloc_node allows us to add the slab to the right
1178 		 * kmem_list3 and not this cpu's kmem_list3
1179 		 */
1180 
1181 		list_for_each_entry(cachep, &cache_chain, next) {
1182 			/*
1183 			 * Set up the size64 kmemlist for cpu before we can
1184 			 * begin anything. Make sure some other cpu on this
1185 			 * node has not already allocated this
1186 			 */
1187 			if (!cachep->nodelists[node]) {
1188 				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1189 				if (!l3)
1190 					goto bad;
1191 				kmem_list3_init(l3);
1192 				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1193 				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1194 
1195 				/*
1196 				 * The l3s don't come and go as CPUs come and
1197 				 * go.  cache_chain_mutex is sufficient
1198 				 * protection here.
1199 				 */
1200 				cachep->nodelists[node] = l3;
1201 			}
1202 
1203 			spin_lock_irq(&cachep->nodelists[node]->list_lock);
1204 			cachep->nodelists[node]->free_limit =
1205 				(1 + nr_cpus_node(node)) *
1206 				cachep->batchcount + cachep->num;
1207 			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1208 		}
1209 
1210 		/*
1211 		 * Now we can go ahead with allocating the shared arrays and
1212 		 * array caches
1213 		 */
1214 		list_for_each_entry(cachep, &cache_chain, next) {
1215 			struct array_cache *nc;
1216 			struct array_cache *shared = NULL;
1217 			struct array_cache **alien = NULL;
1218 
1219 			nc = alloc_arraycache(node, cachep->limit,
1220 						cachep->batchcount);
1221 			if (!nc)
1222 				goto bad;
1223 			if (cachep->shared) {
1224 				shared = alloc_arraycache(node,
1225 					cachep->shared * cachep->batchcount,
1226 					0xbaadf00d);
1227 				if (!shared)
1228 					goto bad;
1229 			}
1230 			if (use_alien_caches) {
1231                                 alien = alloc_alien_cache(node, cachep->limit);
1232                                 if (!alien)
1233                                         goto bad;
1234                         }
1235 			cachep->array[cpu] = nc;
1236 			l3 = cachep->nodelists[node];
1237 			BUG_ON(!l3);
1238 
1239 			spin_lock_irq(&l3->list_lock);
1240 			if (!l3->shared) {
1241 				/*
1242 				 * We are serialised from CPU_DEAD or
1243 				 * CPU_UP_CANCELLED by the cpucontrol lock
1244 				 */
1245 				l3->shared = shared;
1246 				shared = NULL;
1247 			}
1248 #ifdef CONFIG_NUMA
1249 			if (!l3->alien) {
1250 				l3->alien = alien;
1251 				alien = NULL;
1252 			}
1253 #endif
1254 			spin_unlock_irq(&l3->list_lock);
1255 			kfree(shared);
1256 			free_alien_cache(alien);
1257 		}
1258 		break;
1259 	case CPU_ONLINE:
1260 	case CPU_ONLINE_FROZEN:
1261 		start_cpu_timer(cpu);
1262 		break;
1263 #ifdef CONFIG_HOTPLUG_CPU
1264   	case CPU_DOWN_PREPARE:
1265   	case CPU_DOWN_PREPARE_FROZEN:
1266 		/*
1267 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1268 		 * held so that if cache_reap() is invoked it cannot do
1269 		 * anything expensive but will only modify reap_work
1270 		 * and reschedule the timer.
1271 		*/
1272 		cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1273 		/* Now the cache_reaper is guaranteed to be not running. */
1274 		per_cpu(reap_work, cpu).work.func = NULL;
1275   		break;
1276   	case CPU_DOWN_FAILED:
1277   	case CPU_DOWN_FAILED_FROZEN:
1278 		start_cpu_timer(cpu);
1279   		break;
1280 	case CPU_DEAD:
1281 	case CPU_DEAD_FROZEN:
1282 		/*
1283 		 * Even if all the cpus of a node are down, we don't free the
1284 		 * kmem_list3 of any cache. This to avoid a race between
1285 		 * cpu_down, and a kmalloc allocation from another cpu for
1286 		 * memory from the node of the cpu going down.  The list3
1287 		 * structure is usually allocated from kmem_cache_create() and
1288 		 * gets destroyed at kmem_cache_destroy().
1289 		 */
1290 		/* fall thru */
1291 #endif
1292 	case CPU_UP_CANCELED:
1293 	case CPU_UP_CANCELED_FROZEN:
1294 		list_for_each_entry(cachep, &cache_chain, next) {
1295 			struct array_cache *nc;
1296 			struct array_cache *shared;
1297 			struct array_cache **alien;
1298 			cpumask_t mask;
1299 
1300 			mask = node_to_cpumask(node);
1301 			/* cpu is dead; no one can alloc from it. */
1302 			nc = cachep->array[cpu];
1303 			cachep->array[cpu] = NULL;
1304 			l3 = cachep->nodelists[node];
1305 
1306 			if (!l3)
1307 				goto free_array_cache;
1308 
1309 			spin_lock_irq(&l3->list_lock);
1310 
1311 			/* Free limit for this kmem_list3 */
1312 			l3->free_limit -= cachep->batchcount;
1313 			if (nc)
1314 				free_block(cachep, nc->entry, nc->avail, node);
1315 
1316 			if (!cpus_empty(mask)) {
1317 				spin_unlock_irq(&l3->list_lock);
1318 				goto free_array_cache;
1319 			}
1320 
1321 			shared = l3->shared;
1322 			if (shared) {
1323 				free_block(cachep, shared->entry,
1324 					   shared->avail, node);
1325 				l3->shared = NULL;
1326 			}
1327 
1328 			alien = l3->alien;
1329 			l3->alien = NULL;
1330 
1331 			spin_unlock_irq(&l3->list_lock);
1332 
1333 			kfree(shared);
1334 			if (alien) {
1335 				drain_alien_cache(cachep, alien);
1336 				free_alien_cache(alien);
1337 			}
1338 free_array_cache:
1339 			kfree(nc);
1340 		}
1341 		/*
1342 		 * In the previous loop, all the objects were freed to
1343 		 * the respective cache's slabs,  now we can go ahead and
1344 		 * shrink each nodelist to its limit.
1345 		 */
1346 		list_for_each_entry(cachep, &cache_chain, next) {
1347 			l3 = cachep->nodelists[node];
1348 			if (!l3)
1349 				continue;
1350 			drain_freelist(cachep, l3, l3->free_objects);
1351 		}
1352 		break;
1353 	case CPU_LOCK_RELEASE:
1354 		mutex_unlock(&cache_chain_mutex);
1355 		break;
1356 	}
1357 	return NOTIFY_OK;
1358 bad:
1359 	return NOTIFY_BAD;
1360 }
1361 
1362 static struct notifier_block __cpuinitdata cpucache_notifier = {
1363 	&cpuup_callback, NULL, 0
1364 };
1365 
1366 /*
1367  * swap the static kmem_list3 with kmalloced memory
1368  */
1369 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1370 			int nodeid)
1371 {
1372 	struct kmem_list3 *ptr;
1373 
1374 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1375 	BUG_ON(!ptr);
1376 
1377 	local_irq_disable();
1378 	memcpy(ptr, list, sizeof(struct kmem_list3));
1379 	/*
1380 	 * Do not assume that spinlocks can be initialized via memcpy:
1381 	 */
1382 	spin_lock_init(&ptr->list_lock);
1383 
1384 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1385 	cachep->nodelists[nodeid] = ptr;
1386 	local_irq_enable();
1387 }
1388 
1389 /*
1390  * Initialisation.  Called after the page allocator have been initialised and
1391  * before smp_init().
1392  */
1393 void __init kmem_cache_init(void)
1394 {
1395 	size_t left_over;
1396 	struct cache_sizes *sizes;
1397 	struct cache_names *names;
1398 	int i;
1399 	int order;
1400 	int node;
1401 
1402 	if (num_possible_nodes() == 1)
1403 		use_alien_caches = 0;
1404 
1405 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1406 		kmem_list3_init(&initkmem_list3[i]);
1407 		if (i < MAX_NUMNODES)
1408 			cache_cache.nodelists[i] = NULL;
1409 	}
1410 
1411 	/*
1412 	 * Fragmentation resistance on low memory - only use bigger
1413 	 * page orders on machines with more than 32MB of memory.
1414 	 */
1415 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1416 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1417 
1418 	/* Bootstrap is tricky, because several objects are allocated
1419 	 * from caches that do not exist yet:
1420 	 * 1) initialize the cache_cache cache: it contains the struct
1421 	 *    kmem_cache structures of all caches, except cache_cache itself:
1422 	 *    cache_cache is statically allocated.
1423 	 *    Initially an __init data area is used for the head array and the
1424 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1425 	 *    array at the end of the bootstrap.
1426 	 * 2) Create the first kmalloc cache.
1427 	 *    The struct kmem_cache for the new cache is allocated normally.
1428 	 *    An __init data area is used for the head array.
1429 	 * 3) Create the remaining kmalloc caches, with minimally sized
1430 	 *    head arrays.
1431 	 * 4) Replace the __init data head arrays for cache_cache and the first
1432 	 *    kmalloc cache with kmalloc allocated arrays.
1433 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1434 	 *    the other cache's with kmalloc allocated memory.
1435 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1436 	 */
1437 
1438 	node = numa_node_id();
1439 
1440 	/* 1) create the cache_cache */
1441 	INIT_LIST_HEAD(&cache_chain);
1442 	list_add(&cache_cache.next, &cache_chain);
1443 	cache_cache.colour_off = cache_line_size();
1444 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1445 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1446 
1447 	/*
1448 	 * struct kmem_cache size depends on nr_node_ids, which
1449 	 * can be less than MAX_NUMNODES.
1450 	 */
1451 	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1452 				 nr_node_ids * sizeof(struct kmem_list3 *);
1453 #if DEBUG
1454 	cache_cache.obj_size = cache_cache.buffer_size;
1455 #endif
1456 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1457 					cache_line_size());
1458 	cache_cache.reciprocal_buffer_size =
1459 		reciprocal_value(cache_cache.buffer_size);
1460 
1461 	for (order = 0; order < MAX_ORDER; order++) {
1462 		cache_estimate(order, cache_cache.buffer_size,
1463 			cache_line_size(), 0, &left_over, &cache_cache.num);
1464 		if (cache_cache.num)
1465 			break;
1466 	}
1467 	BUG_ON(!cache_cache.num);
1468 	cache_cache.gfporder = order;
1469 	cache_cache.colour = left_over / cache_cache.colour_off;
1470 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1471 				      sizeof(struct slab), cache_line_size());
1472 
1473 	/* 2+3) create the kmalloc caches */
1474 	sizes = malloc_sizes;
1475 	names = cache_names;
1476 
1477 	/*
1478 	 * Initialize the caches that provide memory for the array cache and the
1479 	 * kmem_list3 structures first.  Without this, further allocations will
1480 	 * bug.
1481 	 */
1482 
1483 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1484 					sizes[INDEX_AC].cs_size,
1485 					ARCH_KMALLOC_MINALIGN,
1486 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1487 					NULL);
1488 
1489 	if (INDEX_AC != INDEX_L3) {
1490 		sizes[INDEX_L3].cs_cachep =
1491 			kmem_cache_create(names[INDEX_L3].name,
1492 				sizes[INDEX_L3].cs_size,
1493 				ARCH_KMALLOC_MINALIGN,
1494 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1495 				NULL);
1496 	}
1497 
1498 	slab_early_init = 0;
1499 
1500 	while (sizes->cs_size != ULONG_MAX) {
1501 		/*
1502 		 * For performance, all the general caches are L1 aligned.
1503 		 * This should be particularly beneficial on SMP boxes, as it
1504 		 * eliminates "false sharing".
1505 		 * Note for systems short on memory removing the alignment will
1506 		 * allow tighter packing of the smaller caches.
1507 		 */
1508 		if (!sizes->cs_cachep) {
1509 			sizes->cs_cachep = kmem_cache_create(names->name,
1510 					sizes->cs_size,
1511 					ARCH_KMALLOC_MINALIGN,
1512 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1513 					NULL);
1514 		}
1515 #ifdef CONFIG_ZONE_DMA
1516 		sizes->cs_dmacachep = kmem_cache_create(
1517 					names->name_dma,
1518 					sizes->cs_size,
1519 					ARCH_KMALLOC_MINALIGN,
1520 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1521 						SLAB_PANIC,
1522 					NULL);
1523 #endif
1524 		sizes++;
1525 		names++;
1526 	}
1527 	/* 4) Replace the bootstrap head arrays */
1528 	{
1529 		struct array_cache *ptr;
1530 
1531 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1532 
1533 		local_irq_disable();
1534 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1535 		memcpy(ptr, cpu_cache_get(&cache_cache),
1536 		       sizeof(struct arraycache_init));
1537 		/*
1538 		 * Do not assume that spinlocks can be initialized via memcpy:
1539 		 */
1540 		spin_lock_init(&ptr->lock);
1541 
1542 		cache_cache.array[smp_processor_id()] = ptr;
1543 		local_irq_enable();
1544 
1545 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1546 
1547 		local_irq_disable();
1548 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1549 		       != &initarray_generic.cache);
1550 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1551 		       sizeof(struct arraycache_init));
1552 		/*
1553 		 * Do not assume that spinlocks can be initialized via memcpy:
1554 		 */
1555 		spin_lock_init(&ptr->lock);
1556 
1557 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1558 		    ptr;
1559 		local_irq_enable();
1560 	}
1561 	/* 5) Replace the bootstrap kmem_list3's */
1562 	{
1563 		int nid;
1564 
1565 		/* Replace the static kmem_list3 structures for the boot cpu */
1566 		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1567 
1568 		for_each_online_node(nid) {
1569 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1570 				  &initkmem_list3[SIZE_AC + nid], nid);
1571 
1572 			if (INDEX_AC != INDEX_L3) {
1573 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1574 					  &initkmem_list3[SIZE_L3 + nid], nid);
1575 			}
1576 		}
1577 	}
1578 
1579 	/* 6) resize the head arrays to their final sizes */
1580 	{
1581 		struct kmem_cache *cachep;
1582 		mutex_lock(&cache_chain_mutex);
1583 		list_for_each_entry(cachep, &cache_chain, next)
1584 			if (enable_cpucache(cachep))
1585 				BUG();
1586 		mutex_unlock(&cache_chain_mutex);
1587 	}
1588 
1589 	/* Annotate slab for lockdep -- annotate the malloc caches */
1590 	init_lock_keys();
1591 
1592 
1593 	/* Done! */
1594 	g_cpucache_up = FULL;
1595 
1596 	/*
1597 	 * Register a cpu startup notifier callback that initializes
1598 	 * cpu_cache_get for all new cpus
1599 	 */
1600 	register_cpu_notifier(&cpucache_notifier);
1601 
1602 	/*
1603 	 * The reap timers are started later, with a module init call: That part
1604 	 * of the kernel is not yet operational.
1605 	 */
1606 }
1607 
1608 static int __init cpucache_init(void)
1609 {
1610 	int cpu;
1611 
1612 	/*
1613 	 * Register the timers that return unneeded pages to the page allocator
1614 	 */
1615 	for_each_online_cpu(cpu)
1616 		start_cpu_timer(cpu);
1617 	return 0;
1618 }
1619 __initcall(cpucache_init);
1620 
1621 /*
1622  * Interface to system's page allocator. No need to hold the cache-lock.
1623  *
1624  * If we requested dmaable memory, we will get it. Even if we
1625  * did not request dmaable memory, we might get it, but that
1626  * would be relatively rare and ignorable.
1627  */
1628 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1629 {
1630 	struct page *page;
1631 	int nr_pages;
1632 	int i;
1633 
1634 #ifndef CONFIG_MMU
1635 	/*
1636 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1637 	 * requires __GFP_COMP to properly refcount higher order allocations
1638 	 */
1639 	flags |= __GFP_COMP;
1640 #endif
1641 
1642 	flags |= cachep->gfpflags;
1643 
1644 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1645 	if (!page)
1646 		return NULL;
1647 
1648 	nr_pages = (1 << cachep->gfporder);
1649 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1650 		add_zone_page_state(page_zone(page),
1651 			NR_SLAB_RECLAIMABLE, nr_pages);
1652 	else
1653 		add_zone_page_state(page_zone(page),
1654 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1655 	for (i = 0; i < nr_pages; i++)
1656 		__SetPageSlab(page + i);
1657 	return page_address(page);
1658 }
1659 
1660 /*
1661  * Interface to system's page release.
1662  */
1663 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1664 {
1665 	unsigned long i = (1 << cachep->gfporder);
1666 	struct page *page = virt_to_page(addr);
1667 	const unsigned long nr_freed = i;
1668 
1669 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1670 		sub_zone_page_state(page_zone(page),
1671 				NR_SLAB_RECLAIMABLE, nr_freed);
1672 	else
1673 		sub_zone_page_state(page_zone(page),
1674 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1675 	while (i--) {
1676 		BUG_ON(!PageSlab(page));
1677 		__ClearPageSlab(page);
1678 		page++;
1679 	}
1680 	if (current->reclaim_state)
1681 		current->reclaim_state->reclaimed_slab += nr_freed;
1682 	free_pages((unsigned long)addr, cachep->gfporder);
1683 }
1684 
1685 static void kmem_rcu_free(struct rcu_head *head)
1686 {
1687 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1688 	struct kmem_cache *cachep = slab_rcu->cachep;
1689 
1690 	kmem_freepages(cachep, slab_rcu->addr);
1691 	if (OFF_SLAB(cachep))
1692 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1693 }
1694 
1695 #if DEBUG
1696 
1697 #ifdef CONFIG_DEBUG_PAGEALLOC
1698 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1699 			    unsigned long caller)
1700 {
1701 	int size = obj_size(cachep);
1702 
1703 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1704 
1705 	if (size < 5 * sizeof(unsigned long))
1706 		return;
1707 
1708 	*addr++ = 0x12345678;
1709 	*addr++ = caller;
1710 	*addr++ = smp_processor_id();
1711 	size -= 3 * sizeof(unsigned long);
1712 	{
1713 		unsigned long *sptr = &caller;
1714 		unsigned long svalue;
1715 
1716 		while (!kstack_end(sptr)) {
1717 			svalue = *sptr++;
1718 			if (kernel_text_address(svalue)) {
1719 				*addr++ = svalue;
1720 				size -= sizeof(unsigned long);
1721 				if (size <= sizeof(unsigned long))
1722 					break;
1723 			}
1724 		}
1725 
1726 	}
1727 	*addr++ = 0x87654321;
1728 }
1729 #endif
1730 
1731 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1732 {
1733 	int size = obj_size(cachep);
1734 	addr = &((char *)addr)[obj_offset(cachep)];
1735 
1736 	memset(addr, val, size);
1737 	*(unsigned char *)(addr + size - 1) = POISON_END;
1738 }
1739 
1740 static void dump_line(char *data, int offset, int limit)
1741 {
1742 	int i;
1743 	unsigned char error = 0;
1744 	int bad_count = 0;
1745 
1746 	printk(KERN_ERR "%03x:", offset);
1747 	for (i = 0; i < limit; i++) {
1748 		if (data[offset + i] != POISON_FREE) {
1749 			error = data[offset + i];
1750 			bad_count++;
1751 		}
1752 		printk(" %02x", (unsigned char)data[offset + i]);
1753 	}
1754 	printk("\n");
1755 
1756 	if (bad_count == 1) {
1757 		error ^= POISON_FREE;
1758 		if (!(error & (error - 1))) {
1759 			printk(KERN_ERR "Single bit error detected. Probably "
1760 					"bad RAM.\n");
1761 #ifdef CONFIG_X86
1762 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1763 					"test tool.\n");
1764 #else
1765 			printk(KERN_ERR "Run a memory test tool.\n");
1766 #endif
1767 		}
1768 	}
1769 }
1770 #endif
1771 
1772 #if DEBUG
1773 
1774 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1775 {
1776 	int i, size;
1777 	char *realobj;
1778 
1779 	if (cachep->flags & SLAB_RED_ZONE) {
1780 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1781 			*dbg_redzone1(cachep, objp),
1782 			*dbg_redzone2(cachep, objp));
1783 	}
1784 
1785 	if (cachep->flags & SLAB_STORE_USER) {
1786 		printk(KERN_ERR "Last user: [<%p>]",
1787 			*dbg_userword(cachep, objp));
1788 		print_symbol("(%s)",
1789 				(unsigned long)*dbg_userword(cachep, objp));
1790 		printk("\n");
1791 	}
1792 	realobj = (char *)objp + obj_offset(cachep);
1793 	size = obj_size(cachep);
1794 	for (i = 0; i < size && lines; i += 16, lines--) {
1795 		int limit;
1796 		limit = 16;
1797 		if (i + limit > size)
1798 			limit = size - i;
1799 		dump_line(realobj, i, limit);
1800 	}
1801 }
1802 
1803 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1804 {
1805 	char *realobj;
1806 	int size, i;
1807 	int lines = 0;
1808 
1809 	realobj = (char *)objp + obj_offset(cachep);
1810 	size = obj_size(cachep);
1811 
1812 	for (i = 0; i < size; i++) {
1813 		char exp = POISON_FREE;
1814 		if (i == size - 1)
1815 			exp = POISON_END;
1816 		if (realobj[i] != exp) {
1817 			int limit;
1818 			/* Mismatch ! */
1819 			/* Print header */
1820 			if (lines == 0) {
1821 				printk(KERN_ERR
1822 					"Slab corruption: %s start=%p, len=%d\n",
1823 					cachep->name, realobj, size);
1824 				print_objinfo(cachep, objp, 0);
1825 			}
1826 			/* Hexdump the affected line */
1827 			i = (i / 16) * 16;
1828 			limit = 16;
1829 			if (i + limit > size)
1830 				limit = size - i;
1831 			dump_line(realobj, i, limit);
1832 			i += 16;
1833 			lines++;
1834 			/* Limit to 5 lines */
1835 			if (lines > 5)
1836 				break;
1837 		}
1838 	}
1839 	if (lines != 0) {
1840 		/* Print some data about the neighboring objects, if they
1841 		 * exist:
1842 		 */
1843 		struct slab *slabp = virt_to_slab(objp);
1844 		unsigned int objnr;
1845 
1846 		objnr = obj_to_index(cachep, slabp, objp);
1847 		if (objnr) {
1848 			objp = index_to_obj(cachep, slabp, objnr - 1);
1849 			realobj = (char *)objp + obj_offset(cachep);
1850 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1851 			       realobj, size);
1852 			print_objinfo(cachep, objp, 2);
1853 		}
1854 		if (objnr + 1 < cachep->num) {
1855 			objp = index_to_obj(cachep, slabp, objnr + 1);
1856 			realobj = (char *)objp + obj_offset(cachep);
1857 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1858 			       realobj, size);
1859 			print_objinfo(cachep, objp, 2);
1860 		}
1861 	}
1862 }
1863 #endif
1864 
1865 #if DEBUG
1866 /**
1867  * slab_destroy_objs - destroy a slab and its objects
1868  * @cachep: cache pointer being destroyed
1869  * @slabp: slab pointer being destroyed
1870  *
1871  * Call the registered destructor for each object in a slab that is being
1872  * destroyed.
1873  */
1874 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1875 {
1876 	int i;
1877 	for (i = 0; i < cachep->num; i++) {
1878 		void *objp = index_to_obj(cachep, slabp, i);
1879 
1880 		if (cachep->flags & SLAB_POISON) {
1881 #ifdef CONFIG_DEBUG_PAGEALLOC
1882 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1883 					OFF_SLAB(cachep))
1884 				kernel_map_pages(virt_to_page(objp),
1885 					cachep->buffer_size / PAGE_SIZE, 1);
1886 			else
1887 				check_poison_obj(cachep, objp);
1888 #else
1889 			check_poison_obj(cachep, objp);
1890 #endif
1891 		}
1892 		if (cachep->flags & SLAB_RED_ZONE) {
1893 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1894 				slab_error(cachep, "start of a freed object "
1895 					   "was overwritten");
1896 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1897 				slab_error(cachep, "end of a freed object "
1898 					   "was overwritten");
1899 		}
1900 	}
1901 }
1902 #else
1903 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1904 {
1905 }
1906 #endif
1907 
1908 /**
1909  * slab_destroy - destroy and release all objects in a slab
1910  * @cachep: cache pointer being destroyed
1911  * @slabp: slab pointer being destroyed
1912  *
1913  * Destroy all the objs in a slab, and release the mem back to the system.
1914  * Before calling the slab must have been unlinked from the cache.  The
1915  * cache-lock is not held/needed.
1916  */
1917 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1918 {
1919 	void *addr = slabp->s_mem - slabp->colouroff;
1920 
1921 	slab_destroy_objs(cachep, slabp);
1922 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1923 		struct slab_rcu *slab_rcu;
1924 
1925 		slab_rcu = (struct slab_rcu *)slabp;
1926 		slab_rcu->cachep = cachep;
1927 		slab_rcu->addr = addr;
1928 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1929 	} else {
1930 		kmem_freepages(cachep, addr);
1931 		if (OFF_SLAB(cachep))
1932 			kmem_cache_free(cachep->slabp_cache, slabp);
1933 	}
1934 }
1935 
1936 /*
1937  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1938  * size of kmem_list3.
1939  */
1940 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1941 {
1942 	int node;
1943 
1944 	for_each_online_node(node) {
1945 		cachep->nodelists[node] = &initkmem_list3[index + node];
1946 		cachep->nodelists[node]->next_reap = jiffies +
1947 		    REAPTIMEOUT_LIST3 +
1948 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1949 	}
1950 }
1951 
1952 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1953 {
1954 	int i;
1955 	struct kmem_list3 *l3;
1956 
1957 	for_each_online_cpu(i)
1958 	    kfree(cachep->array[i]);
1959 
1960 	/* NUMA: free the list3 structures */
1961 	for_each_online_node(i) {
1962 		l3 = cachep->nodelists[i];
1963 		if (l3) {
1964 			kfree(l3->shared);
1965 			free_alien_cache(l3->alien);
1966 			kfree(l3);
1967 		}
1968 	}
1969 	kmem_cache_free(&cache_cache, cachep);
1970 }
1971 
1972 
1973 /**
1974  * calculate_slab_order - calculate size (page order) of slabs
1975  * @cachep: pointer to the cache that is being created
1976  * @size: size of objects to be created in this cache.
1977  * @align: required alignment for the objects.
1978  * @flags: slab allocation flags
1979  *
1980  * Also calculates the number of objects per slab.
1981  *
1982  * This could be made much more intelligent.  For now, try to avoid using
1983  * high order pages for slabs.  When the gfp() functions are more friendly
1984  * towards high-order requests, this should be changed.
1985  */
1986 static size_t calculate_slab_order(struct kmem_cache *cachep,
1987 			size_t size, size_t align, unsigned long flags)
1988 {
1989 	unsigned long offslab_limit;
1990 	size_t left_over = 0;
1991 	int gfporder;
1992 
1993 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1994 		unsigned int num;
1995 		size_t remainder;
1996 
1997 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
1998 		if (!num)
1999 			continue;
2000 
2001 		if (flags & CFLGS_OFF_SLAB) {
2002 			/*
2003 			 * Max number of objs-per-slab for caches which
2004 			 * use off-slab slabs. Needed to avoid a possible
2005 			 * looping condition in cache_grow().
2006 			 */
2007 			offslab_limit = size - sizeof(struct slab);
2008 			offslab_limit /= sizeof(kmem_bufctl_t);
2009 
2010  			if (num > offslab_limit)
2011 				break;
2012 		}
2013 
2014 		/* Found something acceptable - save it away */
2015 		cachep->num = num;
2016 		cachep->gfporder = gfporder;
2017 		left_over = remainder;
2018 
2019 		/*
2020 		 * A VFS-reclaimable slab tends to have most allocations
2021 		 * as GFP_NOFS and we really don't want to have to be allocating
2022 		 * higher-order pages when we are unable to shrink dcache.
2023 		 */
2024 		if (flags & SLAB_RECLAIM_ACCOUNT)
2025 			break;
2026 
2027 		/*
2028 		 * Large number of objects is good, but very large slabs are
2029 		 * currently bad for the gfp()s.
2030 		 */
2031 		if (gfporder >= slab_break_gfp_order)
2032 			break;
2033 
2034 		/*
2035 		 * Acceptable internal fragmentation?
2036 		 */
2037 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2038 			break;
2039 	}
2040 	return left_over;
2041 }
2042 
2043 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2044 {
2045 	if (g_cpucache_up == FULL)
2046 		return enable_cpucache(cachep);
2047 
2048 	if (g_cpucache_up == NONE) {
2049 		/*
2050 		 * Note: the first kmem_cache_create must create the cache
2051 		 * that's used by kmalloc(24), otherwise the creation of
2052 		 * further caches will BUG().
2053 		 */
2054 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2055 
2056 		/*
2057 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2058 		 * the first cache, then we need to set up all its list3s,
2059 		 * otherwise the creation of further caches will BUG().
2060 		 */
2061 		set_up_list3s(cachep, SIZE_AC);
2062 		if (INDEX_AC == INDEX_L3)
2063 			g_cpucache_up = PARTIAL_L3;
2064 		else
2065 			g_cpucache_up = PARTIAL_AC;
2066 	} else {
2067 		cachep->array[smp_processor_id()] =
2068 			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2069 
2070 		if (g_cpucache_up == PARTIAL_AC) {
2071 			set_up_list3s(cachep, SIZE_L3);
2072 			g_cpucache_up = PARTIAL_L3;
2073 		} else {
2074 			int node;
2075 			for_each_online_node(node) {
2076 				cachep->nodelists[node] =
2077 				    kmalloc_node(sizeof(struct kmem_list3),
2078 						GFP_KERNEL, node);
2079 				BUG_ON(!cachep->nodelists[node]);
2080 				kmem_list3_init(cachep->nodelists[node]);
2081 			}
2082 		}
2083 	}
2084 	cachep->nodelists[numa_node_id()]->next_reap =
2085 			jiffies + REAPTIMEOUT_LIST3 +
2086 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2087 
2088 	cpu_cache_get(cachep)->avail = 0;
2089 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2090 	cpu_cache_get(cachep)->batchcount = 1;
2091 	cpu_cache_get(cachep)->touched = 0;
2092 	cachep->batchcount = 1;
2093 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2094 	return 0;
2095 }
2096 
2097 /**
2098  * kmem_cache_create - Create a cache.
2099  * @name: A string which is used in /proc/slabinfo to identify this cache.
2100  * @size: The size of objects to be created in this cache.
2101  * @align: The required alignment for the objects.
2102  * @flags: SLAB flags
2103  * @ctor: A constructor for the objects.
2104  *
2105  * Returns a ptr to the cache on success, NULL on failure.
2106  * Cannot be called within a int, but can be interrupted.
2107  * The @ctor is run when new pages are allocated by the cache.
2108  *
2109  * @name must be valid until the cache is destroyed. This implies that
2110  * the module calling this has to destroy the cache before getting unloaded.
2111  *
2112  * The flags are
2113  *
2114  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2115  * to catch references to uninitialised memory.
2116  *
2117  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2118  * for buffer overruns.
2119  *
2120  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2121  * cacheline.  This can be beneficial if you're counting cycles as closely
2122  * as davem.
2123  */
2124 struct kmem_cache *
2125 kmem_cache_create (const char *name, size_t size, size_t align,
2126 	unsigned long flags,
2127 	void (*ctor)(void*, struct kmem_cache *, unsigned long))
2128 {
2129 	size_t left_over, slab_size, ralign;
2130 	struct kmem_cache *cachep = NULL, *pc;
2131 
2132 	/*
2133 	 * Sanity checks... these are all serious usage bugs.
2134 	 */
2135 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2136 	    size > KMALLOC_MAX_SIZE) {
2137 		printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2138 				name);
2139 		BUG();
2140 	}
2141 
2142 	/*
2143 	 * We use cache_chain_mutex to ensure a consistent view of
2144 	 * cpu_online_map as well.  Please see cpuup_callback
2145 	 */
2146 	mutex_lock(&cache_chain_mutex);
2147 
2148 	list_for_each_entry(pc, &cache_chain, next) {
2149 		char tmp;
2150 		int res;
2151 
2152 		/*
2153 		 * This happens when the module gets unloaded and doesn't
2154 		 * destroy its slab cache and no-one else reuses the vmalloc
2155 		 * area of the module.  Print a warning.
2156 		 */
2157 		res = probe_kernel_address(pc->name, tmp);
2158 		if (res) {
2159 			printk(KERN_ERR
2160 			       "SLAB: cache with size %d has lost its name\n",
2161 			       pc->buffer_size);
2162 			continue;
2163 		}
2164 
2165 		if (!strcmp(pc->name, name)) {
2166 			printk(KERN_ERR
2167 			       "kmem_cache_create: duplicate cache %s\n", name);
2168 			dump_stack();
2169 			goto oops;
2170 		}
2171 	}
2172 
2173 #if DEBUG
2174 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2175 #if FORCED_DEBUG
2176 	/*
2177 	 * Enable redzoning and last user accounting, except for caches with
2178 	 * large objects, if the increased size would increase the object size
2179 	 * above the next power of two: caches with object sizes just above a
2180 	 * power of two have a significant amount of internal fragmentation.
2181 	 */
2182 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2183 						2 * sizeof(unsigned long long)))
2184 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2185 	if (!(flags & SLAB_DESTROY_BY_RCU))
2186 		flags |= SLAB_POISON;
2187 #endif
2188 	if (flags & SLAB_DESTROY_BY_RCU)
2189 		BUG_ON(flags & SLAB_POISON);
2190 #endif
2191 	/*
2192 	 * Always checks flags, a caller might be expecting debug support which
2193 	 * isn't available.
2194 	 */
2195 	BUG_ON(flags & ~CREATE_MASK);
2196 
2197 	/*
2198 	 * Check that size is in terms of words.  This is needed to avoid
2199 	 * unaligned accesses for some archs when redzoning is used, and makes
2200 	 * sure any on-slab bufctl's are also correctly aligned.
2201 	 */
2202 	if (size & (BYTES_PER_WORD - 1)) {
2203 		size += (BYTES_PER_WORD - 1);
2204 		size &= ~(BYTES_PER_WORD - 1);
2205 	}
2206 
2207 	/* calculate the final buffer alignment: */
2208 
2209 	/* 1) arch recommendation: can be overridden for debug */
2210 	if (flags & SLAB_HWCACHE_ALIGN) {
2211 		/*
2212 		 * Default alignment: as specified by the arch code.  Except if
2213 		 * an object is really small, then squeeze multiple objects into
2214 		 * one cacheline.
2215 		 */
2216 		ralign = cache_line_size();
2217 		while (size <= ralign / 2)
2218 			ralign /= 2;
2219 	} else {
2220 		ralign = BYTES_PER_WORD;
2221 	}
2222 
2223 	/*
2224 	 * Redzoning and user store require word alignment or possibly larger.
2225 	 * Note this will be overridden by architecture or caller mandated
2226 	 * alignment if either is greater than BYTES_PER_WORD.
2227 	 */
2228 	if (flags & SLAB_STORE_USER)
2229 		ralign = BYTES_PER_WORD;
2230 
2231 	if (flags & SLAB_RED_ZONE) {
2232 		ralign = REDZONE_ALIGN;
2233 		/* If redzoning, ensure that the second redzone is suitably
2234 		 * aligned, by adjusting the object size accordingly. */
2235 		size += REDZONE_ALIGN - 1;
2236 		size &= ~(REDZONE_ALIGN - 1);
2237 	}
2238 
2239 	/* 2) arch mandated alignment */
2240 	if (ralign < ARCH_SLAB_MINALIGN) {
2241 		ralign = ARCH_SLAB_MINALIGN;
2242 	}
2243 	/* 3) caller mandated alignment */
2244 	if (ralign < align) {
2245 		ralign = align;
2246 	}
2247 	/* disable debug if necessary */
2248 	if (ralign > __alignof__(unsigned long long))
2249 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2250 	/*
2251 	 * 4) Store it.
2252 	 */
2253 	align = ralign;
2254 
2255 	/* Get cache's description obj. */
2256 	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2257 	if (!cachep)
2258 		goto oops;
2259 
2260 #if DEBUG
2261 	cachep->obj_size = size;
2262 
2263 	/*
2264 	 * Both debugging options require word-alignment which is calculated
2265 	 * into align above.
2266 	 */
2267 	if (flags & SLAB_RED_ZONE) {
2268 		/* add space for red zone words */
2269 		cachep->obj_offset += sizeof(unsigned long long);
2270 		size += 2 * sizeof(unsigned long long);
2271 	}
2272 	if (flags & SLAB_STORE_USER) {
2273 		/* user store requires one word storage behind the end of
2274 		 * the real object. But if the second red zone needs to be
2275 		 * aligned to 64 bits, we must allow that much space.
2276 		 */
2277 		if (flags & SLAB_RED_ZONE)
2278 			size += REDZONE_ALIGN;
2279 		else
2280 			size += BYTES_PER_WORD;
2281 	}
2282 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2283 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2284 	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2285 		cachep->obj_offset += PAGE_SIZE - size;
2286 		size = PAGE_SIZE;
2287 	}
2288 #endif
2289 #endif
2290 
2291 	/*
2292 	 * Determine if the slab management is 'on' or 'off' slab.
2293 	 * (bootstrapping cannot cope with offslab caches so don't do
2294 	 * it too early on.)
2295 	 */
2296 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
2297 		/*
2298 		 * Size is large, assume best to place the slab management obj
2299 		 * off-slab (should allow better packing of objs).
2300 		 */
2301 		flags |= CFLGS_OFF_SLAB;
2302 
2303 	size = ALIGN(size, align);
2304 
2305 	left_over = calculate_slab_order(cachep, size, align, flags);
2306 
2307 	if (!cachep->num) {
2308 		printk(KERN_ERR
2309 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2310 		kmem_cache_free(&cache_cache, cachep);
2311 		cachep = NULL;
2312 		goto oops;
2313 	}
2314 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2315 			  + sizeof(struct slab), align);
2316 
2317 	/*
2318 	 * If the slab has been placed off-slab, and we have enough space then
2319 	 * move it on-slab. This is at the expense of any extra colouring.
2320 	 */
2321 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2322 		flags &= ~CFLGS_OFF_SLAB;
2323 		left_over -= slab_size;
2324 	}
2325 
2326 	if (flags & CFLGS_OFF_SLAB) {
2327 		/* really off slab. No need for manual alignment */
2328 		slab_size =
2329 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2330 	}
2331 
2332 	cachep->colour_off = cache_line_size();
2333 	/* Offset must be a multiple of the alignment. */
2334 	if (cachep->colour_off < align)
2335 		cachep->colour_off = align;
2336 	cachep->colour = left_over / cachep->colour_off;
2337 	cachep->slab_size = slab_size;
2338 	cachep->flags = flags;
2339 	cachep->gfpflags = 0;
2340 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2341 		cachep->gfpflags |= GFP_DMA;
2342 	cachep->buffer_size = size;
2343 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2344 
2345 	if (flags & CFLGS_OFF_SLAB) {
2346 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2347 		/*
2348 		 * This is a possibility for one of the malloc_sizes caches.
2349 		 * But since we go off slab only for object size greater than
2350 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2351 		 * this should not happen at all.
2352 		 * But leave a BUG_ON for some lucky dude.
2353 		 */
2354 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2355 	}
2356 	cachep->ctor = ctor;
2357 	cachep->name = name;
2358 
2359 	if (setup_cpu_cache(cachep)) {
2360 		__kmem_cache_destroy(cachep);
2361 		cachep = NULL;
2362 		goto oops;
2363 	}
2364 
2365 	/* cache setup completed, link it into the list */
2366 	list_add(&cachep->next, &cache_chain);
2367 oops:
2368 	if (!cachep && (flags & SLAB_PANIC))
2369 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2370 		      name);
2371 	mutex_unlock(&cache_chain_mutex);
2372 	return cachep;
2373 }
2374 EXPORT_SYMBOL(kmem_cache_create);
2375 
2376 #if DEBUG
2377 static void check_irq_off(void)
2378 {
2379 	BUG_ON(!irqs_disabled());
2380 }
2381 
2382 static void check_irq_on(void)
2383 {
2384 	BUG_ON(irqs_disabled());
2385 }
2386 
2387 static void check_spinlock_acquired(struct kmem_cache *cachep)
2388 {
2389 #ifdef CONFIG_SMP
2390 	check_irq_off();
2391 	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2392 #endif
2393 }
2394 
2395 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2396 {
2397 #ifdef CONFIG_SMP
2398 	check_irq_off();
2399 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2400 #endif
2401 }
2402 
2403 #else
2404 #define check_irq_off()	do { } while(0)
2405 #define check_irq_on()	do { } while(0)
2406 #define check_spinlock_acquired(x) do { } while(0)
2407 #define check_spinlock_acquired_node(x, y) do { } while(0)
2408 #endif
2409 
2410 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2411 			struct array_cache *ac,
2412 			int force, int node);
2413 
2414 static void do_drain(void *arg)
2415 {
2416 	struct kmem_cache *cachep = arg;
2417 	struct array_cache *ac;
2418 	int node = numa_node_id();
2419 
2420 	check_irq_off();
2421 	ac = cpu_cache_get(cachep);
2422 	spin_lock(&cachep->nodelists[node]->list_lock);
2423 	free_block(cachep, ac->entry, ac->avail, node);
2424 	spin_unlock(&cachep->nodelists[node]->list_lock);
2425 	ac->avail = 0;
2426 }
2427 
2428 static void drain_cpu_caches(struct kmem_cache *cachep)
2429 {
2430 	struct kmem_list3 *l3;
2431 	int node;
2432 
2433 	on_each_cpu(do_drain, cachep, 1, 1);
2434 	check_irq_on();
2435 	for_each_online_node(node) {
2436 		l3 = cachep->nodelists[node];
2437 		if (l3 && l3->alien)
2438 			drain_alien_cache(cachep, l3->alien);
2439 	}
2440 
2441 	for_each_online_node(node) {
2442 		l3 = cachep->nodelists[node];
2443 		if (l3)
2444 			drain_array(cachep, l3, l3->shared, 1, node);
2445 	}
2446 }
2447 
2448 /*
2449  * Remove slabs from the list of free slabs.
2450  * Specify the number of slabs to drain in tofree.
2451  *
2452  * Returns the actual number of slabs released.
2453  */
2454 static int drain_freelist(struct kmem_cache *cache,
2455 			struct kmem_list3 *l3, int tofree)
2456 {
2457 	struct list_head *p;
2458 	int nr_freed;
2459 	struct slab *slabp;
2460 
2461 	nr_freed = 0;
2462 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2463 
2464 		spin_lock_irq(&l3->list_lock);
2465 		p = l3->slabs_free.prev;
2466 		if (p == &l3->slabs_free) {
2467 			spin_unlock_irq(&l3->list_lock);
2468 			goto out;
2469 		}
2470 
2471 		slabp = list_entry(p, struct slab, list);
2472 #if DEBUG
2473 		BUG_ON(slabp->inuse);
2474 #endif
2475 		list_del(&slabp->list);
2476 		/*
2477 		 * Safe to drop the lock. The slab is no longer linked
2478 		 * to the cache.
2479 		 */
2480 		l3->free_objects -= cache->num;
2481 		spin_unlock_irq(&l3->list_lock);
2482 		slab_destroy(cache, slabp);
2483 		nr_freed++;
2484 	}
2485 out:
2486 	return nr_freed;
2487 }
2488 
2489 /* Called with cache_chain_mutex held to protect against cpu hotplug */
2490 static int __cache_shrink(struct kmem_cache *cachep)
2491 {
2492 	int ret = 0, i = 0;
2493 	struct kmem_list3 *l3;
2494 
2495 	drain_cpu_caches(cachep);
2496 
2497 	check_irq_on();
2498 	for_each_online_node(i) {
2499 		l3 = cachep->nodelists[i];
2500 		if (!l3)
2501 			continue;
2502 
2503 		drain_freelist(cachep, l3, l3->free_objects);
2504 
2505 		ret += !list_empty(&l3->slabs_full) ||
2506 			!list_empty(&l3->slabs_partial);
2507 	}
2508 	return (ret ? 1 : 0);
2509 }
2510 
2511 /**
2512  * kmem_cache_shrink - Shrink a cache.
2513  * @cachep: The cache to shrink.
2514  *
2515  * Releases as many slabs as possible for a cache.
2516  * To help debugging, a zero exit status indicates all slabs were released.
2517  */
2518 int kmem_cache_shrink(struct kmem_cache *cachep)
2519 {
2520 	int ret;
2521 	BUG_ON(!cachep || in_interrupt());
2522 
2523 	mutex_lock(&cache_chain_mutex);
2524 	ret = __cache_shrink(cachep);
2525 	mutex_unlock(&cache_chain_mutex);
2526 	return ret;
2527 }
2528 EXPORT_SYMBOL(kmem_cache_shrink);
2529 
2530 /**
2531  * kmem_cache_destroy - delete a cache
2532  * @cachep: the cache to destroy
2533  *
2534  * Remove a &struct kmem_cache object from the slab cache.
2535  *
2536  * It is expected this function will be called by a module when it is
2537  * unloaded.  This will remove the cache completely, and avoid a duplicate
2538  * cache being allocated each time a module is loaded and unloaded, if the
2539  * module doesn't have persistent in-kernel storage across loads and unloads.
2540  *
2541  * The cache must be empty before calling this function.
2542  *
2543  * The caller must guarantee that noone will allocate memory from the cache
2544  * during the kmem_cache_destroy().
2545  */
2546 void kmem_cache_destroy(struct kmem_cache *cachep)
2547 {
2548 	BUG_ON(!cachep || in_interrupt());
2549 
2550 	/* Find the cache in the chain of caches. */
2551 	mutex_lock(&cache_chain_mutex);
2552 	/*
2553 	 * the chain is never empty, cache_cache is never destroyed
2554 	 */
2555 	list_del(&cachep->next);
2556 	if (__cache_shrink(cachep)) {
2557 		slab_error(cachep, "Can't free all objects");
2558 		list_add(&cachep->next, &cache_chain);
2559 		mutex_unlock(&cache_chain_mutex);
2560 		return;
2561 	}
2562 
2563 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2564 		synchronize_rcu();
2565 
2566 	__kmem_cache_destroy(cachep);
2567 	mutex_unlock(&cache_chain_mutex);
2568 }
2569 EXPORT_SYMBOL(kmem_cache_destroy);
2570 
2571 /*
2572  * Get the memory for a slab management obj.
2573  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2574  * always come from malloc_sizes caches.  The slab descriptor cannot
2575  * come from the same cache which is getting created because,
2576  * when we are searching for an appropriate cache for these
2577  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2578  * If we are creating a malloc_sizes cache here it would not be visible to
2579  * kmem_find_general_cachep till the initialization is complete.
2580  * Hence we cannot have slabp_cache same as the original cache.
2581  */
2582 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2583 				   int colour_off, gfp_t local_flags,
2584 				   int nodeid)
2585 {
2586 	struct slab *slabp;
2587 
2588 	if (OFF_SLAB(cachep)) {
2589 		/* Slab management obj is off-slab. */
2590 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2591 					      local_flags & ~GFP_THISNODE, nodeid);
2592 		if (!slabp)
2593 			return NULL;
2594 	} else {
2595 		slabp = objp + colour_off;
2596 		colour_off += cachep->slab_size;
2597 	}
2598 	slabp->inuse = 0;
2599 	slabp->colouroff = colour_off;
2600 	slabp->s_mem = objp + colour_off;
2601 	slabp->nodeid = nodeid;
2602 	return slabp;
2603 }
2604 
2605 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2606 {
2607 	return (kmem_bufctl_t *) (slabp + 1);
2608 }
2609 
2610 static void cache_init_objs(struct kmem_cache *cachep,
2611 			    struct slab *slabp)
2612 {
2613 	int i;
2614 
2615 	for (i = 0; i < cachep->num; i++) {
2616 		void *objp = index_to_obj(cachep, slabp, i);
2617 #if DEBUG
2618 		/* need to poison the objs? */
2619 		if (cachep->flags & SLAB_POISON)
2620 			poison_obj(cachep, objp, POISON_FREE);
2621 		if (cachep->flags & SLAB_STORE_USER)
2622 			*dbg_userword(cachep, objp) = NULL;
2623 
2624 		if (cachep->flags & SLAB_RED_ZONE) {
2625 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2626 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2627 		}
2628 		/*
2629 		 * Constructors are not allowed to allocate memory from the same
2630 		 * cache which they are a constructor for.  Otherwise, deadlock.
2631 		 * They must also be threaded.
2632 		 */
2633 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2634 			cachep->ctor(objp + obj_offset(cachep), cachep,
2635 				     0);
2636 
2637 		if (cachep->flags & SLAB_RED_ZONE) {
2638 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2639 				slab_error(cachep, "constructor overwrote the"
2640 					   " end of an object");
2641 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2642 				slab_error(cachep, "constructor overwrote the"
2643 					   " start of an object");
2644 		}
2645 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2646 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2647 			kernel_map_pages(virt_to_page(objp),
2648 					 cachep->buffer_size / PAGE_SIZE, 0);
2649 #else
2650 		if (cachep->ctor)
2651 			cachep->ctor(objp, cachep, 0);
2652 #endif
2653 		slab_bufctl(slabp)[i] = i + 1;
2654 	}
2655 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2656 	slabp->free = 0;
2657 }
2658 
2659 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2660 {
2661 	if (CONFIG_ZONE_DMA_FLAG) {
2662 		if (flags & GFP_DMA)
2663 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2664 		else
2665 			BUG_ON(cachep->gfpflags & GFP_DMA);
2666 	}
2667 }
2668 
2669 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2670 				int nodeid)
2671 {
2672 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2673 	kmem_bufctl_t next;
2674 
2675 	slabp->inuse++;
2676 	next = slab_bufctl(slabp)[slabp->free];
2677 #if DEBUG
2678 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2679 	WARN_ON(slabp->nodeid != nodeid);
2680 #endif
2681 	slabp->free = next;
2682 
2683 	return objp;
2684 }
2685 
2686 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2687 				void *objp, int nodeid)
2688 {
2689 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2690 
2691 #if DEBUG
2692 	/* Verify that the slab belongs to the intended node */
2693 	WARN_ON(slabp->nodeid != nodeid);
2694 
2695 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2696 		printk(KERN_ERR "slab: double free detected in cache "
2697 				"'%s', objp %p\n", cachep->name, objp);
2698 		BUG();
2699 	}
2700 #endif
2701 	slab_bufctl(slabp)[objnr] = slabp->free;
2702 	slabp->free = objnr;
2703 	slabp->inuse--;
2704 }
2705 
2706 /*
2707  * Map pages beginning at addr to the given cache and slab. This is required
2708  * for the slab allocator to be able to lookup the cache and slab of a
2709  * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2710  */
2711 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2712 			   void *addr)
2713 {
2714 	int nr_pages;
2715 	struct page *page;
2716 
2717 	page = virt_to_page(addr);
2718 
2719 	nr_pages = 1;
2720 	if (likely(!PageCompound(page)))
2721 		nr_pages <<= cache->gfporder;
2722 
2723 	do {
2724 		page_set_cache(page, cache);
2725 		page_set_slab(page, slab);
2726 		page++;
2727 	} while (--nr_pages);
2728 }
2729 
2730 /*
2731  * Grow (by 1) the number of slabs within a cache.  This is called by
2732  * kmem_cache_alloc() when there are no active objs left in a cache.
2733  */
2734 static int cache_grow(struct kmem_cache *cachep,
2735 		gfp_t flags, int nodeid, void *objp)
2736 {
2737 	struct slab *slabp;
2738 	size_t offset;
2739 	gfp_t local_flags;
2740 	struct kmem_list3 *l3;
2741 
2742 	/*
2743 	 * Be lazy and only check for valid flags here,  keeping it out of the
2744 	 * critical path in kmem_cache_alloc().
2745 	 */
2746 	BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
2747 
2748 	local_flags = (flags & GFP_LEVEL_MASK);
2749 	/* Take the l3 list lock to change the colour_next on this node */
2750 	check_irq_off();
2751 	l3 = cachep->nodelists[nodeid];
2752 	spin_lock(&l3->list_lock);
2753 
2754 	/* Get colour for the slab, and cal the next value. */
2755 	offset = l3->colour_next;
2756 	l3->colour_next++;
2757 	if (l3->colour_next >= cachep->colour)
2758 		l3->colour_next = 0;
2759 	spin_unlock(&l3->list_lock);
2760 
2761 	offset *= cachep->colour_off;
2762 
2763 	if (local_flags & __GFP_WAIT)
2764 		local_irq_enable();
2765 
2766 	/*
2767 	 * The test for missing atomic flag is performed here, rather than
2768 	 * the more obvious place, simply to reduce the critical path length
2769 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2770 	 * will eventually be caught here (where it matters).
2771 	 */
2772 	kmem_flagcheck(cachep, flags);
2773 
2774 	/*
2775 	 * Get mem for the objs.  Attempt to allocate a physical page from
2776 	 * 'nodeid'.
2777 	 */
2778 	if (!objp)
2779 		objp = kmem_getpages(cachep, local_flags, nodeid);
2780 	if (!objp)
2781 		goto failed;
2782 
2783 	/* Get slab management. */
2784 	slabp = alloc_slabmgmt(cachep, objp, offset,
2785 			local_flags & ~GFP_THISNODE, nodeid);
2786 	if (!slabp)
2787 		goto opps1;
2788 
2789 	slabp->nodeid = nodeid;
2790 	slab_map_pages(cachep, slabp, objp);
2791 
2792 	cache_init_objs(cachep, slabp);
2793 
2794 	if (local_flags & __GFP_WAIT)
2795 		local_irq_disable();
2796 	check_irq_off();
2797 	spin_lock(&l3->list_lock);
2798 
2799 	/* Make slab active. */
2800 	list_add_tail(&slabp->list, &(l3->slabs_free));
2801 	STATS_INC_GROWN(cachep);
2802 	l3->free_objects += cachep->num;
2803 	spin_unlock(&l3->list_lock);
2804 	return 1;
2805 opps1:
2806 	kmem_freepages(cachep, objp);
2807 failed:
2808 	if (local_flags & __GFP_WAIT)
2809 		local_irq_disable();
2810 	return 0;
2811 }
2812 
2813 #if DEBUG
2814 
2815 /*
2816  * Perform extra freeing checks:
2817  * - detect bad pointers.
2818  * - POISON/RED_ZONE checking
2819  */
2820 static void kfree_debugcheck(const void *objp)
2821 {
2822 	if (!virt_addr_valid(objp)) {
2823 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2824 		       (unsigned long)objp);
2825 		BUG();
2826 	}
2827 }
2828 
2829 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2830 {
2831 	unsigned long long redzone1, redzone2;
2832 
2833 	redzone1 = *dbg_redzone1(cache, obj);
2834 	redzone2 = *dbg_redzone2(cache, obj);
2835 
2836 	/*
2837 	 * Redzone is ok.
2838 	 */
2839 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2840 		return;
2841 
2842 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2843 		slab_error(cache, "double free detected");
2844 	else
2845 		slab_error(cache, "memory outside object was overwritten");
2846 
2847 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2848 			obj, redzone1, redzone2);
2849 }
2850 
2851 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2852 				   void *caller)
2853 {
2854 	struct page *page;
2855 	unsigned int objnr;
2856 	struct slab *slabp;
2857 
2858 	objp -= obj_offset(cachep);
2859 	kfree_debugcheck(objp);
2860 	page = virt_to_head_page(objp);
2861 
2862 	slabp = page_get_slab(page);
2863 
2864 	if (cachep->flags & SLAB_RED_ZONE) {
2865 		verify_redzone_free(cachep, objp);
2866 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2867 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2868 	}
2869 	if (cachep->flags & SLAB_STORE_USER)
2870 		*dbg_userword(cachep, objp) = caller;
2871 
2872 	objnr = obj_to_index(cachep, slabp, objp);
2873 
2874 	BUG_ON(objnr >= cachep->num);
2875 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2876 
2877 #ifdef CONFIG_DEBUG_SLAB_LEAK
2878 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2879 #endif
2880 	if (cachep->flags & SLAB_POISON) {
2881 #ifdef CONFIG_DEBUG_PAGEALLOC
2882 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2883 			store_stackinfo(cachep, objp, (unsigned long)caller);
2884 			kernel_map_pages(virt_to_page(objp),
2885 					 cachep->buffer_size / PAGE_SIZE, 0);
2886 		} else {
2887 			poison_obj(cachep, objp, POISON_FREE);
2888 		}
2889 #else
2890 		poison_obj(cachep, objp, POISON_FREE);
2891 #endif
2892 	}
2893 	return objp;
2894 }
2895 
2896 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2897 {
2898 	kmem_bufctl_t i;
2899 	int entries = 0;
2900 
2901 	/* Check slab's freelist to see if this obj is there. */
2902 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2903 		entries++;
2904 		if (entries > cachep->num || i >= cachep->num)
2905 			goto bad;
2906 	}
2907 	if (entries != cachep->num - slabp->inuse) {
2908 bad:
2909 		printk(KERN_ERR "slab: Internal list corruption detected in "
2910 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2911 			cachep->name, cachep->num, slabp, slabp->inuse);
2912 		for (i = 0;
2913 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2914 		     i++) {
2915 			if (i % 16 == 0)
2916 				printk("\n%03x:", i);
2917 			printk(" %02x", ((unsigned char *)slabp)[i]);
2918 		}
2919 		printk("\n");
2920 		BUG();
2921 	}
2922 }
2923 #else
2924 #define kfree_debugcheck(x) do { } while(0)
2925 #define cache_free_debugcheck(x,objp,z) (objp)
2926 #define check_slabp(x,y) do { } while(0)
2927 #endif
2928 
2929 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2930 {
2931 	int batchcount;
2932 	struct kmem_list3 *l3;
2933 	struct array_cache *ac;
2934 	int node;
2935 
2936 	node = numa_node_id();
2937 
2938 	check_irq_off();
2939 	ac = cpu_cache_get(cachep);
2940 retry:
2941 	batchcount = ac->batchcount;
2942 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2943 		/*
2944 		 * If there was little recent activity on this cache, then
2945 		 * perform only a partial refill.  Otherwise we could generate
2946 		 * refill bouncing.
2947 		 */
2948 		batchcount = BATCHREFILL_LIMIT;
2949 	}
2950 	l3 = cachep->nodelists[node];
2951 
2952 	BUG_ON(ac->avail > 0 || !l3);
2953 	spin_lock(&l3->list_lock);
2954 
2955 	/* See if we can refill from the shared array */
2956 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2957 		goto alloc_done;
2958 
2959 	while (batchcount > 0) {
2960 		struct list_head *entry;
2961 		struct slab *slabp;
2962 		/* Get slab alloc is to come from. */
2963 		entry = l3->slabs_partial.next;
2964 		if (entry == &l3->slabs_partial) {
2965 			l3->free_touched = 1;
2966 			entry = l3->slabs_free.next;
2967 			if (entry == &l3->slabs_free)
2968 				goto must_grow;
2969 		}
2970 
2971 		slabp = list_entry(entry, struct slab, list);
2972 		check_slabp(cachep, slabp);
2973 		check_spinlock_acquired(cachep);
2974 
2975 		/*
2976 		 * The slab was either on partial or free list so
2977 		 * there must be at least one object available for
2978 		 * allocation.
2979 		 */
2980 		BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
2981 
2982 		while (slabp->inuse < cachep->num && batchcount--) {
2983 			STATS_INC_ALLOCED(cachep);
2984 			STATS_INC_ACTIVE(cachep);
2985 			STATS_SET_HIGH(cachep);
2986 
2987 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2988 							    node);
2989 		}
2990 		check_slabp(cachep, slabp);
2991 
2992 		/* move slabp to correct slabp list: */
2993 		list_del(&slabp->list);
2994 		if (slabp->free == BUFCTL_END)
2995 			list_add(&slabp->list, &l3->slabs_full);
2996 		else
2997 			list_add(&slabp->list, &l3->slabs_partial);
2998 	}
2999 
3000 must_grow:
3001 	l3->free_objects -= ac->avail;
3002 alloc_done:
3003 	spin_unlock(&l3->list_lock);
3004 
3005 	if (unlikely(!ac->avail)) {
3006 		int x;
3007 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3008 
3009 		/* cache_grow can reenable interrupts, then ac could change. */
3010 		ac = cpu_cache_get(cachep);
3011 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3012 			return NULL;
3013 
3014 		if (!ac->avail)		/* objects refilled by interrupt? */
3015 			goto retry;
3016 	}
3017 	ac->touched = 1;
3018 	return ac->entry[--ac->avail];
3019 }
3020 
3021 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3022 						gfp_t flags)
3023 {
3024 	might_sleep_if(flags & __GFP_WAIT);
3025 #if DEBUG
3026 	kmem_flagcheck(cachep, flags);
3027 #endif
3028 }
3029 
3030 #if DEBUG
3031 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3032 				gfp_t flags, void *objp, void *caller)
3033 {
3034 	if (!objp)
3035 		return objp;
3036 	if (cachep->flags & SLAB_POISON) {
3037 #ifdef CONFIG_DEBUG_PAGEALLOC
3038 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3039 			kernel_map_pages(virt_to_page(objp),
3040 					 cachep->buffer_size / PAGE_SIZE, 1);
3041 		else
3042 			check_poison_obj(cachep, objp);
3043 #else
3044 		check_poison_obj(cachep, objp);
3045 #endif
3046 		poison_obj(cachep, objp, POISON_INUSE);
3047 	}
3048 	if (cachep->flags & SLAB_STORE_USER)
3049 		*dbg_userword(cachep, objp) = caller;
3050 
3051 	if (cachep->flags & SLAB_RED_ZONE) {
3052 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3053 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3054 			slab_error(cachep, "double free, or memory outside"
3055 						" object was overwritten");
3056 			printk(KERN_ERR
3057 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3058 				objp, *dbg_redzone1(cachep, objp),
3059 				*dbg_redzone2(cachep, objp));
3060 		}
3061 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3062 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3063 	}
3064 #ifdef CONFIG_DEBUG_SLAB_LEAK
3065 	{
3066 		struct slab *slabp;
3067 		unsigned objnr;
3068 
3069 		slabp = page_get_slab(virt_to_head_page(objp));
3070 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3071 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3072 	}
3073 #endif
3074 	objp += obj_offset(cachep);
3075 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3076 		cachep->ctor(objp, cachep, 0);
3077 #if ARCH_SLAB_MINALIGN
3078 	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3079 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3080 		       objp, ARCH_SLAB_MINALIGN);
3081 	}
3082 #endif
3083 	return objp;
3084 }
3085 #else
3086 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3087 #endif
3088 
3089 #ifdef CONFIG_FAILSLAB
3090 
3091 static struct failslab_attr {
3092 
3093 	struct fault_attr attr;
3094 
3095 	u32 ignore_gfp_wait;
3096 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3097 	struct dentry *ignore_gfp_wait_file;
3098 #endif
3099 
3100 } failslab = {
3101 	.attr = FAULT_ATTR_INITIALIZER,
3102 	.ignore_gfp_wait = 1,
3103 };
3104 
3105 static int __init setup_failslab(char *str)
3106 {
3107 	return setup_fault_attr(&failslab.attr, str);
3108 }
3109 __setup("failslab=", setup_failslab);
3110 
3111 static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3112 {
3113 	if (cachep == &cache_cache)
3114 		return 0;
3115 	if (flags & __GFP_NOFAIL)
3116 		return 0;
3117 	if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3118 		return 0;
3119 
3120 	return should_fail(&failslab.attr, obj_size(cachep));
3121 }
3122 
3123 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3124 
3125 static int __init failslab_debugfs(void)
3126 {
3127 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3128 	struct dentry *dir;
3129 	int err;
3130 
3131 	err = init_fault_attr_dentries(&failslab.attr, "failslab");
3132 	if (err)
3133 		return err;
3134 	dir = failslab.attr.dentries.dir;
3135 
3136 	failslab.ignore_gfp_wait_file =
3137 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
3138 				      &failslab.ignore_gfp_wait);
3139 
3140 	if (!failslab.ignore_gfp_wait_file) {
3141 		err = -ENOMEM;
3142 		debugfs_remove(failslab.ignore_gfp_wait_file);
3143 		cleanup_fault_attr_dentries(&failslab.attr);
3144 	}
3145 
3146 	return err;
3147 }
3148 
3149 late_initcall(failslab_debugfs);
3150 
3151 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3152 
3153 #else /* CONFIG_FAILSLAB */
3154 
3155 static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3156 {
3157 	return 0;
3158 }
3159 
3160 #endif /* CONFIG_FAILSLAB */
3161 
3162 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3163 {
3164 	void *objp;
3165 	struct array_cache *ac;
3166 
3167 	check_irq_off();
3168 
3169 	ac = cpu_cache_get(cachep);
3170 	if (likely(ac->avail)) {
3171 		STATS_INC_ALLOCHIT(cachep);
3172 		ac->touched = 1;
3173 		objp = ac->entry[--ac->avail];
3174 	} else {
3175 		STATS_INC_ALLOCMISS(cachep);
3176 		objp = cache_alloc_refill(cachep, flags);
3177 	}
3178 	return objp;
3179 }
3180 
3181 #ifdef CONFIG_NUMA
3182 /*
3183  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3184  *
3185  * If we are in_interrupt, then process context, including cpusets and
3186  * mempolicy, may not apply and should not be used for allocation policy.
3187  */
3188 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3189 {
3190 	int nid_alloc, nid_here;
3191 
3192 	if (in_interrupt() || (flags & __GFP_THISNODE))
3193 		return NULL;
3194 	nid_alloc = nid_here = numa_node_id();
3195 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3196 		nid_alloc = cpuset_mem_spread_node();
3197 	else if (current->mempolicy)
3198 		nid_alloc = slab_node(current->mempolicy);
3199 	if (nid_alloc != nid_here)
3200 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3201 	return NULL;
3202 }
3203 
3204 /*
3205  * Fallback function if there was no memory available and no objects on a
3206  * certain node and fall back is permitted. First we scan all the
3207  * available nodelists for available objects. If that fails then we
3208  * perform an allocation without specifying a node. This allows the page
3209  * allocator to do its reclaim / fallback magic. We then insert the
3210  * slab into the proper nodelist and then allocate from it.
3211  */
3212 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3213 {
3214 	struct zonelist *zonelist;
3215 	gfp_t local_flags;
3216 	struct zone **z;
3217 	void *obj = NULL;
3218 	int nid;
3219 
3220 	if (flags & __GFP_THISNODE)
3221 		return NULL;
3222 
3223 	zonelist = &NODE_DATA(slab_node(current->mempolicy))
3224 			->node_zonelists[gfp_zone(flags)];
3225 	local_flags = (flags & GFP_LEVEL_MASK);
3226 
3227 retry:
3228 	/*
3229 	 * Look through allowed nodes for objects available
3230 	 * from existing per node queues.
3231 	 */
3232 	for (z = zonelist->zones; *z && !obj; z++) {
3233 		nid = zone_to_nid(*z);
3234 
3235 		if (cpuset_zone_allowed_hardwall(*z, flags) &&
3236 			cache->nodelists[nid] &&
3237 			cache->nodelists[nid]->free_objects)
3238 				obj = ____cache_alloc_node(cache,
3239 					flags | GFP_THISNODE, nid);
3240 	}
3241 
3242 	if (!obj) {
3243 		/*
3244 		 * This allocation will be performed within the constraints
3245 		 * of the current cpuset / memory policy requirements.
3246 		 * We may trigger various forms of reclaim on the allowed
3247 		 * set and go into memory reserves if necessary.
3248 		 */
3249 		if (local_flags & __GFP_WAIT)
3250 			local_irq_enable();
3251 		kmem_flagcheck(cache, flags);
3252 		obj = kmem_getpages(cache, flags, -1);
3253 		if (local_flags & __GFP_WAIT)
3254 			local_irq_disable();
3255 		if (obj) {
3256 			/*
3257 			 * Insert into the appropriate per node queues
3258 			 */
3259 			nid = page_to_nid(virt_to_page(obj));
3260 			if (cache_grow(cache, flags, nid, obj)) {
3261 				obj = ____cache_alloc_node(cache,
3262 					flags | GFP_THISNODE, nid);
3263 				if (!obj)
3264 					/*
3265 					 * Another processor may allocate the
3266 					 * objects in the slab since we are
3267 					 * not holding any locks.
3268 					 */
3269 					goto retry;
3270 			} else {
3271 				/* cache_grow already freed obj */
3272 				obj = NULL;
3273 			}
3274 		}
3275 	}
3276 	return obj;
3277 }
3278 
3279 /*
3280  * A interface to enable slab creation on nodeid
3281  */
3282 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3283 				int nodeid)
3284 {
3285 	struct list_head *entry;
3286 	struct slab *slabp;
3287 	struct kmem_list3 *l3;
3288 	void *obj;
3289 	int x;
3290 
3291 	l3 = cachep->nodelists[nodeid];
3292 	BUG_ON(!l3);
3293 
3294 retry:
3295 	check_irq_off();
3296 	spin_lock(&l3->list_lock);
3297 	entry = l3->slabs_partial.next;
3298 	if (entry == &l3->slabs_partial) {
3299 		l3->free_touched = 1;
3300 		entry = l3->slabs_free.next;
3301 		if (entry == &l3->slabs_free)
3302 			goto must_grow;
3303 	}
3304 
3305 	slabp = list_entry(entry, struct slab, list);
3306 	check_spinlock_acquired_node(cachep, nodeid);
3307 	check_slabp(cachep, slabp);
3308 
3309 	STATS_INC_NODEALLOCS(cachep);
3310 	STATS_INC_ACTIVE(cachep);
3311 	STATS_SET_HIGH(cachep);
3312 
3313 	BUG_ON(slabp->inuse == cachep->num);
3314 
3315 	obj = slab_get_obj(cachep, slabp, nodeid);
3316 	check_slabp(cachep, slabp);
3317 	l3->free_objects--;
3318 	/* move slabp to correct slabp list: */
3319 	list_del(&slabp->list);
3320 
3321 	if (slabp->free == BUFCTL_END)
3322 		list_add(&slabp->list, &l3->slabs_full);
3323 	else
3324 		list_add(&slabp->list, &l3->slabs_partial);
3325 
3326 	spin_unlock(&l3->list_lock);
3327 	goto done;
3328 
3329 must_grow:
3330 	spin_unlock(&l3->list_lock);
3331 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3332 	if (x)
3333 		goto retry;
3334 
3335 	return fallback_alloc(cachep, flags);
3336 
3337 done:
3338 	return obj;
3339 }
3340 
3341 /**
3342  * kmem_cache_alloc_node - Allocate an object on the specified node
3343  * @cachep: The cache to allocate from.
3344  * @flags: See kmalloc().
3345  * @nodeid: node number of the target node.
3346  * @caller: return address of caller, used for debug information
3347  *
3348  * Identical to kmem_cache_alloc but it will allocate memory on the given
3349  * node, which can improve the performance for cpu bound structures.
3350  *
3351  * Fallback to other node is possible if __GFP_THISNODE is not set.
3352  */
3353 static __always_inline void *
3354 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3355 		   void *caller)
3356 {
3357 	unsigned long save_flags;
3358 	void *ptr;
3359 
3360 	if (should_failslab(cachep, flags))
3361 		return NULL;
3362 
3363 	cache_alloc_debugcheck_before(cachep, flags);
3364 	local_irq_save(save_flags);
3365 
3366 	if (unlikely(nodeid == -1))
3367 		nodeid = numa_node_id();
3368 
3369 	if (unlikely(!cachep->nodelists[nodeid])) {
3370 		/* Node not bootstrapped yet */
3371 		ptr = fallback_alloc(cachep, flags);
3372 		goto out;
3373 	}
3374 
3375 	if (nodeid == numa_node_id()) {
3376 		/*
3377 		 * Use the locally cached objects if possible.
3378 		 * However ____cache_alloc does not allow fallback
3379 		 * to other nodes. It may fail while we still have
3380 		 * objects on other nodes available.
3381 		 */
3382 		ptr = ____cache_alloc(cachep, flags);
3383 		if (ptr)
3384 			goto out;
3385 	}
3386 	/* ___cache_alloc_node can fall back to other nodes */
3387 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3388   out:
3389 	local_irq_restore(save_flags);
3390 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3391 
3392 	if (unlikely((flags & __GFP_ZERO) && ptr))
3393 		memset(ptr, 0, obj_size(cachep));
3394 
3395 	return ptr;
3396 }
3397 
3398 static __always_inline void *
3399 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3400 {
3401 	void *objp;
3402 
3403 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3404 		objp = alternate_node_alloc(cache, flags);
3405 		if (objp)
3406 			goto out;
3407 	}
3408 	objp = ____cache_alloc(cache, flags);
3409 
3410 	/*
3411 	 * We may just have run out of memory on the local node.
3412 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3413 	 */
3414  	if (!objp)
3415  		objp = ____cache_alloc_node(cache, flags, numa_node_id());
3416 
3417   out:
3418 	return objp;
3419 }
3420 #else
3421 
3422 static __always_inline void *
3423 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3424 {
3425 	return ____cache_alloc(cachep, flags);
3426 }
3427 
3428 #endif /* CONFIG_NUMA */
3429 
3430 static __always_inline void *
3431 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3432 {
3433 	unsigned long save_flags;
3434 	void *objp;
3435 
3436 	if (should_failslab(cachep, flags))
3437 		return NULL;
3438 
3439 	cache_alloc_debugcheck_before(cachep, flags);
3440 	local_irq_save(save_flags);
3441 	objp = __do_cache_alloc(cachep, flags);
3442 	local_irq_restore(save_flags);
3443 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3444 	prefetchw(objp);
3445 
3446 	if (unlikely((flags & __GFP_ZERO) && objp))
3447 		memset(objp, 0, obj_size(cachep));
3448 
3449 	return objp;
3450 }
3451 
3452 /*
3453  * Caller needs to acquire correct kmem_list's list_lock
3454  */
3455 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3456 		       int node)
3457 {
3458 	int i;
3459 	struct kmem_list3 *l3;
3460 
3461 	for (i = 0; i < nr_objects; i++) {
3462 		void *objp = objpp[i];
3463 		struct slab *slabp;
3464 
3465 		slabp = virt_to_slab(objp);
3466 		l3 = cachep->nodelists[node];
3467 		list_del(&slabp->list);
3468 		check_spinlock_acquired_node(cachep, node);
3469 		check_slabp(cachep, slabp);
3470 		slab_put_obj(cachep, slabp, objp, node);
3471 		STATS_DEC_ACTIVE(cachep);
3472 		l3->free_objects++;
3473 		check_slabp(cachep, slabp);
3474 
3475 		/* fixup slab chains */
3476 		if (slabp->inuse == 0) {
3477 			if (l3->free_objects > l3->free_limit) {
3478 				l3->free_objects -= cachep->num;
3479 				/* No need to drop any previously held
3480 				 * lock here, even if we have a off-slab slab
3481 				 * descriptor it is guaranteed to come from
3482 				 * a different cache, refer to comments before
3483 				 * alloc_slabmgmt.
3484 				 */
3485 				slab_destroy(cachep, slabp);
3486 			} else {
3487 				list_add(&slabp->list, &l3->slabs_free);
3488 			}
3489 		} else {
3490 			/* Unconditionally move a slab to the end of the
3491 			 * partial list on free - maximum time for the
3492 			 * other objects to be freed, too.
3493 			 */
3494 			list_add_tail(&slabp->list, &l3->slabs_partial);
3495 		}
3496 	}
3497 }
3498 
3499 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3500 {
3501 	int batchcount;
3502 	struct kmem_list3 *l3;
3503 	int node = numa_node_id();
3504 
3505 	batchcount = ac->batchcount;
3506 #if DEBUG
3507 	BUG_ON(!batchcount || batchcount > ac->avail);
3508 #endif
3509 	check_irq_off();
3510 	l3 = cachep->nodelists[node];
3511 	spin_lock(&l3->list_lock);
3512 	if (l3->shared) {
3513 		struct array_cache *shared_array = l3->shared;
3514 		int max = shared_array->limit - shared_array->avail;
3515 		if (max) {
3516 			if (batchcount > max)
3517 				batchcount = max;
3518 			memcpy(&(shared_array->entry[shared_array->avail]),
3519 			       ac->entry, sizeof(void *) * batchcount);
3520 			shared_array->avail += batchcount;
3521 			goto free_done;
3522 		}
3523 	}
3524 
3525 	free_block(cachep, ac->entry, batchcount, node);
3526 free_done:
3527 #if STATS
3528 	{
3529 		int i = 0;
3530 		struct list_head *p;
3531 
3532 		p = l3->slabs_free.next;
3533 		while (p != &(l3->slabs_free)) {
3534 			struct slab *slabp;
3535 
3536 			slabp = list_entry(p, struct slab, list);
3537 			BUG_ON(slabp->inuse);
3538 
3539 			i++;
3540 			p = p->next;
3541 		}
3542 		STATS_SET_FREEABLE(cachep, i);
3543 	}
3544 #endif
3545 	spin_unlock(&l3->list_lock);
3546 	ac->avail -= batchcount;
3547 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3548 }
3549 
3550 /*
3551  * Release an obj back to its cache. If the obj has a constructed state, it must
3552  * be in this state _before_ it is released.  Called with disabled ints.
3553  */
3554 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3555 {
3556 	struct array_cache *ac = cpu_cache_get(cachep);
3557 
3558 	check_irq_off();
3559 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3560 
3561 	if (cache_free_alien(cachep, objp))
3562 		return;
3563 
3564 	if (likely(ac->avail < ac->limit)) {
3565 		STATS_INC_FREEHIT(cachep);
3566 		ac->entry[ac->avail++] = objp;
3567 		return;
3568 	} else {
3569 		STATS_INC_FREEMISS(cachep);
3570 		cache_flusharray(cachep, ac);
3571 		ac->entry[ac->avail++] = objp;
3572 	}
3573 }
3574 
3575 /**
3576  * kmem_cache_alloc - Allocate an object
3577  * @cachep: The cache to allocate from.
3578  * @flags: See kmalloc().
3579  *
3580  * Allocate an object from this cache.  The flags are only relevant
3581  * if the cache has no available objects.
3582  */
3583 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3584 {
3585 	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3586 }
3587 EXPORT_SYMBOL(kmem_cache_alloc);
3588 
3589 /**
3590  * kmem_ptr_validate - check if an untrusted pointer might
3591  *	be a slab entry.
3592  * @cachep: the cache we're checking against
3593  * @ptr: pointer to validate
3594  *
3595  * This verifies that the untrusted pointer looks sane:
3596  * it is _not_ a guarantee that the pointer is actually
3597  * part of the slab cache in question, but it at least
3598  * validates that the pointer can be dereferenced and
3599  * looks half-way sane.
3600  *
3601  * Currently only used for dentry validation.
3602  */
3603 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3604 {
3605 	unsigned long addr = (unsigned long)ptr;
3606 	unsigned long min_addr = PAGE_OFFSET;
3607 	unsigned long align_mask = BYTES_PER_WORD - 1;
3608 	unsigned long size = cachep->buffer_size;
3609 	struct page *page;
3610 
3611 	if (unlikely(addr < min_addr))
3612 		goto out;
3613 	if (unlikely(addr > (unsigned long)high_memory - size))
3614 		goto out;
3615 	if (unlikely(addr & align_mask))
3616 		goto out;
3617 	if (unlikely(!kern_addr_valid(addr)))
3618 		goto out;
3619 	if (unlikely(!kern_addr_valid(addr + size - 1)))
3620 		goto out;
3621 	page = virt_to_page(ptr);
3622 	if (unlikely(!PageSlab(page)))
3623 		goto out;
3624 	if (unlikely(page_get_cache(page) != cachep))
3625 		goto out;
3626 	return 1;
3627 out:
3628 	return 0;
3629 }
3630 
3631 #ifdef CONFIG_NUMA
3632 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3633 {
3634 	return __cache_alloc_node(cachep, flags, nodeid,
3635 			__builtin_return_address(0));
3636 }
3637 EXPORT_SYMBOL(kmem_cache_alloc_node);
3638 
3639 static __always_inline void *
3640 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3641 {
3642 	struct kmem_cache *cachep;
3643 
3644 	cachep = kmem_find_general_cachep(size, flags);
3645 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3646 		return cachep;
3647 	return kmem_cache_alloc_node(cachep, flags, node);
3648 }
3649 
3650 #ifdef CONFIG_DEBUG_SLAB
3651 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3652 {
3653 	return __do_kmalloc_node(size, flags, node,
3654 			__builtin_return_address(0));
3655 }
3656 EXPORT_SYMBOL(__kmalloc_node);
3657 
3658 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3659 		int node, void *caller)
3660 {
3661 	return __do_kmalloc_node(size, flags, node, caller);
3662 }
3663 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3664 #else
3665 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3666 {
3667 	return __do_kmalloc_node(size, flags, node, NULL);
3668 }
3669 EXPORT_SYMBOL(__kmalloc_node);
3670 #endif /* CONFIG_DEBUG_SLAB */
3671 #endif /* CONFIG_NUMA */
3672 
3673 /**
3674  * __do_kmalloc - allocate memory
3675  * @size: how many bytes of memory are required.
3676  * @flags: the type of memory to allocate (see kmalloc).
3677  * @caller: function caller for debug tracking of the caller
3678  */
3679 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3680 					  void *caller)
3681 {
3682 	struct kmem_cache *cachep;
3683 
3684 	/* If you want to save a few bytes .text space: replace
3685 	 * __ with kmem_.
3686 	 * Then kmalloc uses the uninlined functions instead of the inline
3687 	 * functions.
3688 	 */
3689 	cachep = __find_general_cachep(size, flags);
3690 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3691 		return cachep;
3692 	return __cache_alloc(cachep, flags, caller);
3693 }
3694 
3695 
3696 #ifdef CONFIG_DEBUG_SLAB
3697 void *__kmalloc(size_t size, gfp_t flags)
3698 {
3699 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3700 }
3701 EXPORT_SYMBOL(__kmalloc);
3702 
3703 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3704 {
3705 	return __do_kmalloc(size, flags, caller);
3706 }
3707 EXPORT_SYMBOL(__kmalloc_track_caller);
3708 
3709 #else
3710 void *__kmalloc(size_t size, gfp_t flags)
3711 {
3712 	return __do_kmalloc(size, flags, NULL);
3713 }
3714 EXPORT_SYMBOL(__kmalloc);
3715 #endif
3716 
3717 /**
3718  * kmem_cache_free - Deallocate an object
3719  * @cachep: The cache the allocation was from.
3720  * @objp: The previously allocated object.
3721  *
3722  * Free an object which was previously allocated from this
3723  * cache.
3724  */
3725 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3726 {
3727 	unsigned long flags;
3728 
3729 	BUG_ON(virt_to_cache(objp) != cachep);
3730 
3731 	local_irq_save(flags);
3732 	debug_check_no_locks_freed(objp, obj_size(cachep));
3733 	__cache_free(cachep, objp);
3734 	local_irq_restore(flags);
3735 }
3736 EXPORT_SYMBOL(kmem_cache_free);
3737 
3738 /**
3739  * kfree - free previously allocated memory
3740  * @objp: pointer returned by kmalloc.
3741  *
3742  * If @objp is NULL, no operation is performed.
3743  *
3744  * Don't free memory not originally allocated by kmalloc()
3745  * or you will run into trouble.
3746  */
3747 void kfree(const void *objp)
3748 {
3749 	struct kmem_cache *c;
3750 	unsigned long flags;
3751 
3752 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3753 		return;
3754 	local_irq_save(flags);
3755 	kfree_debugcheck(objp);
3756 	c = virt_to_cache(objp);
3757 	debug_check_no_locks_freed(objp, obj_size(c));
3758 	__cache_free(c, (void *)objp);
3759 	local_irq_restore(flags);
3760 }
3761 EXPORT_SYMBOL(kfree);
3762 
3763 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3764 {
3765 	return obj_size(cachep);
3766 }
3767 EXPORT_SYMBOL(kmem_cache_size);
3768 
3769 const char *kmem_cache_name(struct kmem_cache *cachep)
3770 {
3771 	return cachep->name;
3772 }
3773 EXPORT_SYMBOL_GPL(kmem_cache_name);
3774 
3775 /*
3776  * This initializes kmem_list3 or resizes varioius caches for all nodes.
3777  */
3778 static int alloc_kmemlist(struct kmem_cache *cachep)
3779 {
3780 	int node;
3781 	struct kmem_list3 *l3;
3782 	struct array_cache *new_shared;
3783 	struct array_cache **new_alien = NULL;
3784 
3785 	for_each_online_node(node) {
3786 
3787                 if (use_alien_caches) {
3788                         new_alien = alloc_alien_cache(node, cachep->limit);
3789                         if (!new_alien)
3790                                 goto fail;
3791                 }
3792 
3793 		new_shared = NULL;
3794 		if (cachep->shared) {
3795 			new_shared = alloc_arraycache(node,
3796 				cachep->shared*cachep->batchcount,
3797 					0xbaadf00d);
3798 			if (!new_shared) {
3799 				free_alien_cache(new_alien);
3800 				goto fail;
3801 			}
3802 		}
3803 
3804 		l3 = cachep->nodelists[node];
3805 		if (l3) {
3806 			struct array_cache *shared = l3->shared;
3807 
3808 			spin_lock_irq(&l3->list_lock);
3809 
3810 			if (shared)
3811 				free_block(cachep, shared->entry,
3812 						shared->avail, node);
3813 
3814 			l3->shared = new_shared;
3815 			if (!l3->alien) {
3816 				l3->alien = new_alien;
3817 				new_alien = NULL;
3818 			}
3819 			l3->free_limit = (1 + nr_cpus_node(node)) *
3820 					cachep->batchcount + cachep->num;
3821 			spin_unlock_irq(&l3->list_lock);
3822 			kfree(shared);
3823 			free_alien_cache(new_alien);
3824 			continue;
3825 		}
3826 		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3827 		if (!l3) {
3828 			free_alien_cache(new_alien);
3829 			kfree(new_shared);
3830 			goto fail;
3831 		}
3832 
3833 		kmem_list3_init(l3);
3834 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3835 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3836 		l3->shared = new_shared;
3837 		l3->alien = new_alien;
3838 		l3->free_limit = (1 + nr_cpus_node(node)) *
3839 					cachep->batchcount + cachep->num;
3840 		cachep->nodelists[node] = l3;
3841 	}
3842 	return 0;
3843 
3844 fail:
3845 	if (!cachep->next.next) {
3846 		/* Cache is not active yet. Roll back what we did */
3847 		node--;
3848 		while (node >= 0) {
3849 			if (cachep->nodelists[node]) {
3850 				l3 = cachep->nodelists[node];
3851 
3852 				kfree(l3->shared);
3853 				free_alien_cache(l3->alien);
3854 				kfree(l3);
3855 				cachep->nodelists[node] = NULL;
3856 			}
3857 			node--;
3858 		}
3859 	}
3860 	return -ENOMEM;
3861 }
3862 
3863 struct ccupdate_struct {
3864 	struct kmem_cache *cachep;
3865 	struct array_cache *new[NR_CPUS];
3866 };
3867 
3868 static void do_ccupdate_local(void *info)
3869 {
3870 	struct ccupdate_struct *new = info;
3871 	struct array_cache *old;
3872 
3873 	check_irq_off();
3874 	old = cpu_cache_get(new->cachep);
3875 
3876 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3877 	new->new[smp_processor_id()] = old;
3878 }
3879 
3880 /* Always called with the cache_chain_mutex held */
3881 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3882 				int batchcount, int shared)
3883 {
3884 	struct ccupdate_struct *new;
3885 	int i;
3886 
3887 	new = kzalloc(sizeof(*new), GFP_KERNEL);
3888 	if (!new)
3889 		return -ENOMEM;
3890 
3891 	for_each_online_cpu(i) {
3892 		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3893 						batchcount);
3894 		if (!new->new[i]) {
3895 			for (i--; i >= 0; i--)
3896 				kfree(new->new[i]);
3897 			kfree(new);
3898 			return -ENOMEM;
3899 		}
3900 	}
3901 	new->cachep = cachep;
3902 
3903 	on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
3904 
3905 	check_irq_on();
3906 	cachep->batchcount = batchcount;
3907 	cachep->limit = limit;
3908 	cachep->shared = shared;
3909 
3910 	for_each_online_cpu(i) {
3911 		struct array_cache *ccold = new->new[i];
3912 		if (!ccold)
3913 			continue;
3914 		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3915 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3916 		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3917 		kfree(ccold);
3918 	}
3919 	kfree(new);
3920 	return alloc_kmemlist(cachep);
3921 }
3922 
3923 /* Called with cache_chain_mutex held always */
3924 static int enable_cpucache(struct kmem_cache *cachep)
3925 {
3926 	int err;
3927 	int limit, shared;
3928 
3929 	/*
3930 	 * The head array serves three purposes:
3931 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3932 	 * - reduce the number of spinlock operations.
3933 	 * - reduce the number of linked list operations on the slab and
3934 	 *   bufctl chains: array operations are cheaper.
3935 	 * The numbers are guessed, we should auto-tune as described by
3936 	 * Bonwick.
3937 	 */
3938 	if (cachep->buffer_size > 131072)
3939 		limit = 1;
3940 	else if (cachep->buffer_size > PAGE_SIZE)
3941 		limit = 8;
3942 	else if (cachep->buffer_size > 1024)
3943 		limit = 24;
3944 	else if (cachep->buffer_size > 256)
3945 		limit = 54;
3946 	else
3947 		limit = 120;
3948 
3949 	/*
3950 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3951 	 * allocation behaviour: Most allocs on one cpu, most free operations
3952 	 * on another cpu. For these cases, an efficient object passing between
3953 	 * cpus is necessary. This is provided by a shared array. The array
3954 	 * replaces Bonwick's magazine layer.
3955 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3956 	 * to a larger limit. Thus disabled by default.
3957 	 */
3958 	shared = 0;
3959 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
3960 		shared = 8;
3961 
3962 #if DEBUG
3963 	/*
3964 	 * With debugging enabled, large batchcount lead to excessively long
3965 	 * periods with disabled local interrupts. Limit the batchcount
3966 	 */
3967 	if (limit > 32)
3968 		limit = 32;
3969 #endif
3970 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
3971 	if (err)
3972 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3973 		       cachep->name, -err);
3974 	return err;
3975 }
3976 
3977 /*
3978  * Drain an array if it contains any elements taking the l3 lock only if
3979  * necessary. Note that the l3 listlock also protects the array_cache
3980  * if drain_array() is used on the shared array.
3981  */
3982 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3983 			 struct array_cache *ac, int force, int node)
3984 {
3985 	int tofree;
3986 
3987 	if (!ac || !ac->avail)
3988 		return;
3989 	if (ac->touched && !force) {
3990 		ac->touched = 0;
3991 	} else {
3992 		spin_lock_irq(&l3->list_lock);
3993 		if (ac->avail) {
3994 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3995 			if (tofree > ac->avail)
3996 				tofree = (ac->avail + 1) / 2;
3997 			free_block(cachep, ac->entry, tofree, node);
3998 			ac->avail -= tofree;
3999 			memmove(ac->entry, &(ac->entry[tofree]),
4000 				sizeof(void *) * ac->avail);
4001 		}
4002 		spin_unlock_irq(&l3->list_lock);
4003 	}
4004 }
4005 
4006 /**
4007  * cache_reap - Reclaim memory from caches.
4008  * @w: work descriptor
4009  *
4010  * Called from workqueue/eventd every few seconds.
4011  * Purpose:
4012  * - clear the per-cpu caches for this CPU.
4013  * - return freeable pages to the main free memory pool.
4014  *
4015  * If we cannot acquire the cache chain mutex then just give up - we'll try
4016  * again on the next iteration.
4017  */
4018 static void cache_reap(struct work_struct *w)
4019 {
4020 	struct kmem_cache *searchp;
4021 	struct kmem_list3 *l3;
4022 	int node = numa_node_id();
4023 	struct delayed_work *work =
4024 		container_of(w, struct delayed_work, work);
4025 
4026 	if (!mutex_trylock(&cache_chain_mutex))
4027 		/* Give up. Setup the next iteration. */
4028 		goto out;
4029 
4030 	list_for_each_entry(searchp, &cache_chain, next) {
4031 		check_irq_on();
4032 
4033 		/*
4034 		 * We only take the l3 lock if absolutely necessary and we
4035 		 * have established with reasonable certainty that
4036 		 * we can do some work if the lock was obtained.
4037 		 */
4038 		l3 = searchp->nodelists[node];
4039 
4040 		reap_alien(searchp, l3);
4041 
4042 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4043 
4044 		/*
4045 		 * These are racy checks but it does not matter
4046 		 * if we skip one check or scan twice.
4047 		 */
4048 		if (time_after(l3->next_reap, jiffies))
4049 			goto next;
4050 
4051 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4052 
4053 		drain_array(searchp, l3, l3->shared, 0, node);
4054 
4055 		if (l3->free_touched)
4056 			l3->free_touched = 0;
4057 		else {
4058 			int freed;
4059 
4060 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4061 				5 * searchp->num - 1) / (5 * searchp->num));
4062 			STATS_ADD_REAPED(searchp, freed);
4063 		}
4064 next:
4065 		cond_resched();
4066 	}
4067 	check_irq_on();
4068 	mutex_unlock(&cache_chain_mutex);
4069 	next_reap_node();
4070 out:
4071 	/* Set up the next iteration */
4072 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4073 }
4074 
4075 #ifdef CONFIG_PROC_FS
4076 
4077 static void print_slabinfo_header(struct seq_file *m)
4078 {
4079 	/*
4080 	 * Output format version, so at least we can change it
4081 	 * without _too_ many complaints.
4082 	 */
4083 #if STATS
4084 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4085 #else
4086 	seq_puts(m, "slabinfo - version: 2.1\n");
4087 #endif
4088 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4089 		 "<objperslab> <pagesperslab>");
4090 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4091 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4092 #if STATS
4093 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4094 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4095 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4096 #endif
4097 	seq_putc(m, '\n');
4098 }
4099 
4100 static void *s_start(struct seq_file *m, loff_t *pos)
4101 {
4102 	loff_t n = *pos;
4103 
4104 	mutex_lock(&cache_chain_mutex);
4105 	if (!n)
4106 		print_slabinfo_header(m);
4107 
4108 	return seq_list_start(&cache_chain, *pos);
4109 }
4110 
4111 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4112 {
4113 	return seq_list_next(p, &cache_chain, pos);
4114 }
4115 
4116 static void s_stop(struct seq_file *m, void *p)
4117 {
4118 	mutex_unlock(&cache_chain_mutex);
4119 }
4120 
4121 static int s_show(struct seq_file *m, void *p)
4122 {
4123 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4124 	struct slab *slabp;
4125 	unsigned long active_objs;
4126 	unsigned long num_objs;
4127 	unsigned long active_slabs = 0;
4128 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4129 	const char *name;
4130 	char *error = NULL;
4131 	int node;
4132 	struct kmem_list3 *l3;
4133 
4134 	active_objs = 0;
4135 	num_slabs = 0;
4136 	for_each_online_node(node) {
4137 		l3 = cachep->nodelists[node];
4138 		if (!l3)
4139 			continue;
4140 
4141 		check_irq_on();
4142 		spin_lock_irq(&l3->list_lock);
4143 
4144 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4145 			if (slabp->inuse != cachep->num && !error)
4146 				error = "slabs_full accounting error";
4147 			active_objs += cachep->num;
4148 			active_slabs++;
4149 		}
4150 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4151 			if (slabp->inuse == cachep->num && !error)
4152 				error = "slabs_partial inuse accounting error";
4153 			if (!slabp->inuse && !error)
4154 				error = "slabs_partial/inuse accounting error";
4155 			active_objs += slabp->inuse;
4156 			active_slabs++;
4157 		}
4158 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4159 			if (slabp->inuse && !error)
4160 				error = "slabs_free/inuse accounting error";
4161 			num_slabs++;
4162 		}
4163 		free_objects += l3->free_objects;
4164 		if (l3->shared)
4165 			shared_avail += l3->shared->avail;
4166 
4167 		spin_unlock_irq(&l3->list_lock);
4168 	}
4169 	num_slabs += active_slabs;
4170 	num_objs = num_slabs * cachep->num;
4171 	if (num_objs - active_objs != free_objects && !error)
4172 		error = "free_objects accounting error";
4173 
4174 	name = cachep->name;
4175 	if (error)
4176 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4177 
4178 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4179 		   name, active_objs, num_objs, cachep->buffer_size,
4180 		   cachep->num, (1 << cachep->gfporder));
4181 	seq_printf(m, " : tunables %4u %4u %4u",
4182 		   cachep->limit, cachep->batchcount, cachep->shared);
4183 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4184 		   active_slabs, num_slabs, shared_avail);
4185 #if STATS
4186 	{			/* list3 stats */
4187 		unsigned long high = cachep->high_mark;
4188 		unsigned long allocs = cachep->num_allocations;
4189 		unsigned long grown = cachep->grown;
4190 		unsigned long reaped = cachep->reaped;
4191 		unsigned long errors = cachep->errors;
4192 		unsigned long max_freeable = cachep->max_freeable;
4193 		unsigned long node_allocs = cachep->node_allocs;
4194 		unsigned long node_frees = cachep->node_frees;
4195 		unsigned long overflows = cachep->node_overflow;
4196 
4197 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
4198 				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
4199 				reaped, errors, max_freeable, node_allocs,
4200 				node_frees, overflows);
4201 	}
4202 	/* cpu stats */
4203 	{
4204 		unsigned long allochit = atomic_read(&cachep->allochit);
4205 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4206 		unsigned long freehit = atomic_read(&cachep->freehit);
4207 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4208 
4209 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4210 			   allochit, allocmiss, freehit, freemiss);
4211 	}
4212 #endif
4213 	seq_putc(m, '\n');
4214 	return 0;
4215 }
4216 
4217 /*
4218  * slabinfo_op - iterator that generates /proc/slabinfo
4219  *
4220  * Output layout:
4221  * cache-name
4222  * num-active-objs
4223  * total-objs
4224  * object size
4225  * num-active-slabs
4226  * total-slabs
4227  * num-pages-per-slab
4228  * + further values on SMP and with statistics enabled
4229  */
4230 
4231 const struct seq_operations slabinfo_op = {
4232 	.start = s_start,
4233 	.next = s_next,
4234 	.stop = s_stop,
4235 	.show = s_show,
4236 };
4237 
4238 #define MAX_SLABINFO_WRITE 128
4239 /**
4240  * slabinfo_write - Tuning for the slab allocator
4241  * @file: unused
4242  * @buffer: user buffer
4243  * @count: data length
4244  * @ppos: unused
4245  */
4246 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4247 		       size_t count, loff_t *ppos)
4248 {
4249 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4250 	int limit, batchcount, shared, res;
4251 	struct kmem_cache *cachep;
4252 
4253 	if (count > MAX_SLABINFO_WRITE)
4254 		return -EINVAL;
4255 	if (copy_from_user(&kbuf, buffer, count))
4256 		return -EFAULT;
4257 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4258 
4259 	tmp = strchr(kbuf, ' ');
4260 	if (!tmp)
4261 		return -EINVAL;
4262 	*tmp = '\0';
4263 	tmp++;
4264 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4265 		return -EINVAL;
4266 
4267 	/* Find the cache in the chain of caches. */
4268 	mutex_lock(&cache_chain_mutex);
4269 	res = -EINVAL;
4270 	list_for_each_entry(cachep, &cache_chain, next) {
4271 		if (!strcmp(cachep->name, kbuf)) {
4272 			if (limit < 1 || batchcount < 1 ||
4273 					batchcount > limit || shared < 0) {
4274 				res = 0;
4275 			} else {
4276 				res = do_tune_cpucache(cachep, limit,
4277 						       batchcount, shared);
4278 			}
4279 			break;
4280 		}
4281 	}
4282 	mutex_unlock(&cache_chain_mutex);
4283 	if (res >= 0)
4284 		res = count;
4285 	return res;
4286 }
4287 
4288 #ifdef CONFIG_DEBUG_SLAB_LEAK
4289 
4290 static void *leaks_start(struct seq_file *m, loff_t *pos)
4291 {
4292 	mutex_lock(&cache_chain_mutex);
4293 	return seq_list_start(&cache_chain, *pos);
4294 }
4295 
4296 static inline int add_caller(unsigned long *n, unsigned long v)
4297 {
4298 	unsigned long *p;
4299 	int l;
4300 	if (!v)
4301 		return 1;
4302 	l = n[1];
4303 	p = n + 2;
4304 	while (l) {
4305 		int i = l/2;
4306 		unsigned long *q = p + 2 * i;
4307 		if (*q == v) {
4308 			q[1]++;
4309 			return 1;
4310 		}
4311 		if (*q > v) {
4312 			l = i;
4313 		} else {
4314 			p = q + 2;
4315 			l -= i + 1;
4316 		}
4317 	}
4318 	if (++n[1] == n[0])
4319 		return 0;
4320 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4321 	p[0] = v;
4322 	p[1] = 1;
4323 	return 1;
4324 }
4325 
4326 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4327 {
4328 	void *p;
4329 	int i;
4330 	if (n[0] == n[1])
4331 		return;
4332 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4333 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4334 			continue;
4335 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4336 			return;
4337 	}
4338 }
4339 
4340 static void show_symbol(struct seq_file *m, unsigned long address)
4341 {
4342 #ifdef CONFIG_KALLSYMS
4343 	unsigned long offset, size;
4344 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4345 
4346 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4347 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4348 		if (modname[0])
4349 			seq_printf(m, " [%s]", modname);
4350 		return;
4351 	}
4352 #endif
4353 	seq_printf(m, "%p", (void *)address);
4354 }
4355 
4356 static int leaks_show(struct seq_file *m, void *p)
4357 {
4358 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4359 	struct slab *slabp;
4360 	struct kmem_list3 *l3;
4361 	const char *name;
4362 	unsigned long *n = m->private;
4363 	int node;
4364 	int i;
4365 
4366 	if (!(cachep->flags & SLAB_STORE_USER))
4367 		return 0;
4368 	if (!(cachep->flags & SLAB_RED_ZONE))
4369 		return 0;
4370 
4371 	/* OK, we can do it */
4372 
4373 	n[1] = 0;
4374 
4375 	for_each_online_node(node) {
4376 		l3 = cachep->nodelists[node];
4377 		if (!l3)
4378 			continue;
4379 
4380 		check_irq_on();
4381 		spin_lock_irq(&l3->list_lock);
4382 
4383 		list_for_each_entry(slabp, &l3->slabs_full, list)
4384 			handle_slab(n, cachep, slabp);
4385 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4386 			handle_slab(n, cachep, slabp);
4387 		spin_unlock_irq(&l3->list_lock);
4388 	}
4389 	name = cachep->name;
4390 	if (n[0] == n[1]) {
4391 		/* Increase the buffer size */
4392 		mutex_unlock(&cache_chain_mutex);
4393 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4394 		if (!m->private) {
4395 			/* Too bad, we are really out */
4396 			m->private = n;
4397 			mutex_lock(&cache_chain_mutex);
4398 			return -ENOMEM;
4399 		}
4400 		*(unsigned long *)m->private = n[0] * 2;
4401 		kfree(n);
4402 		mutex_lock(&cache_chain_mutex);
4403 		/* Now make sure this entry will be retried */
4404 		m->count = m->size;
4405 		return 0;
4406 	}
4407 	for (i = 0; i < n[1]; i++) {
4408 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4409 		show_symbol(m, n[2*i+2]);
4410 		seq_putc(m, '\n');
4411 	}
4412 
4413 	return 0;
4414 }
4415 
4416 const struct seq_operations slabstats_op = {
4417 	.start = leaks_start,
4418 	.next = s_next,
4419 	.stop = s_stop,
4420 	.show = leaks_show,
4421 };
4422 #endif
4423 #endif
4424 
4425 /**
4426  * ksize - get the actual amount of memory allocated for a given object
4427  * @objp: Pointer to the object
4428  *
4429  * kmalloc may internally round up allocations and return more memory
4430  * than requested. ksize() can be used to determine the actual amount of
4431  * memory allocated. The caller may use this additional memory, even though
4432  * a smaller amount of memory was initially specified with the kmalloc call.
4433  * The caller must guarantee that objp points to a valid object previously
4434  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4435  * must not be freed during the duration of the call.
4436  */
4437 size_t ksize(const void *objp)
4438 {
4439 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
4440 		return 0;
4441 
4442 	return obj_size(virt_to_cache(objp));
4443 }
4444