xref: /openbmc/linux/mm/slab.c (revision 81d67439)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<asm/cacheflush.h>
121 #include	<asm/tlbflush.h>
122 #include	<asm/page.h>
123 
124 /*
125  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
126  *		  0 for faster, smaller code (especially in the critical paths).
127  *
128  * STATS	- 1 to collect stats for /proc/slabinfo.
129  *		  0 for faster, smaller code (especially in the critical paths).
130  *
131  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
132  */
133 
134 #ifdef CONFIG_DEBUG_SLAB
135 #define	DEBUG		1
136 #define	STATS		1
137 #define	FORCED_DEBUG	1
138 #else
139 #define	DEBUG		0
140 #define	STATS		0
141 #define	FORCED_DEBUG	0
142 #endif
143 
144 /* Shouldn't this be in a header file somewhere? */
145 #define	BYTES_PER_WORD		sizeof(void *)
146 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
147 
148 #ifndef ARCH_KMALLOC_FLAGS
149 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
150 #endif
151 
152 /* Legal flag mask for kmem_cache_create(). */
153 #if DEBUG
154 # define CREATE_MASK	(SLAB_RED_ZONE | \
155 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
156 			 SLAB_CACHE_DMA | \
157 			 SLAB_STORE_USER | \
158 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
159 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
160 			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
161 #else
162 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
163 			 SLAB_CACHE_DMA | \
164 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
165 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
166 			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
167 #endif
168 
169 /*
170  * kmem_bufctl_t:
171  *
172  * Bufctl's are used for linking objs within a slab
173  * linked offsets.
174  *
175  * This implementation relies on "struct page" for locating the cache &
176  * slab an object belongs to.
177  * This allows the bufctl structure to be small (one int), but limits
178  * the number of objects a slab (not a cache) can contain when off-slab
179  * bufctls are used. The limit is the size of the largest general cache
180  * that does not use off-slab slabs.
181  * For 32bit archs with 4 kB pages, is this 56.
182  * This is not serious, as it is only for large objects, when it is unwise
183  * to have too many per slab.
184  * Note: This limit can be raised by introducing a general cache whose size
185  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
186  */
187 
188 typedef unsigned int kmem_bufctl_t;
189 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
190 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
191 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
192 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
193 
194 /*
195  * struct slab_rcu
196  *
197  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
198  * arrange for kmem_freepages to be called via RCU.  This is useful if
199  * we need to approach a kernel structure obliquely, from its address
200  * obtained without the usual locking.  We can lock the structure to
201  * stabilize it and check it's still at the given address, only if we
202  * can be sure that the memory has not been meanwhile reused for some
203  * other kind of object (which our subsystem's lock might corrupt).
204  *
205  * rcu_read_lock before reading the address, then rcu_read_unlock after
206  * taking the spinlock within the structure expected at that address.
207  */
208 struct slab_rcu {
209 	struct rcu_head head;
210 	struct kmem_cache *cachep;
211 	void *addr;
212 };
213 
214 /*
215  * struct slab
216  *
217  * Manages the objs in a slab. Placed either at the beginning of mem allocated
218  * for a slab, or allocated from an general cache.
219  * Slabs are chained into three list: fully used, partial, fully free slabs.
220  */
221 struct slab {
222 	union {
223 		struct {
224 			struct list_head list;
225 			unsigned long colouroff;
226 			void *s_mem;		/* including colour offset */
227 			unsigned int inuse;	/* num of objs active in slab */
228 			kmem_bufctl_t free;
229 			unsigned short nodeid;
230 		};
231 		struct slab_rcu __slab_cover_slab_rcu;
232 	};
233 };
234 
235 /*
236  * struct array_cache
237  *
238  * Purpose:
239  * - LIFO ordering, to hand out cache-warm objects from _alloc
240  * - reduce the number of linked list operations
241  * - reduce spinlock operations
242  *
243  * The limit is stored in the per-cpu structure to reduce the data cache
244  * footprint.
245  *
246  */
247 struct array_cache {
248 	unsigned int avail;
249 	unsigned int limit;
250 	unsigned int batchcount;
251 	unsigned int touched;
252 	spinlock_t lock;
253 	void *entry[];	/*
254 			 * Must have this definition in here for the proper
255 			 * alignment of array_cache. Also simplifies accessing
256 			 * the entries.
257 			 */
258 };
259 
260 /*
261  * bootstrap: The caches do not work without cpuarrays anymore, but the
262  * cpuarrays are allocated from the generic caches...
263  */
264 #define BOOT_CPUCACHE_ENTRIES	1
265 struct arraycache_init {
266 	struct array_cache cache;
267 	void *entries[BOOT_CPUCACHE_ENTRIES];
268 };
269 
270 /*
271  * The slab lists for all objects.
272  */
273 struct kmem_list3 {
274 	struct list_head slabs_partial;	/* partial list first, better asm code */
275 	struct list_head slabs_full;
276 	struct list_head slabs_free;
277 	unsigned long free_objects;
278 	unsigned int free_limit;
279 	unsigned int colour_next;	/* Per-node cache coloring */
280 	spinlock_t list_lock;
281 	struct array_cache *shared;	/* shared per node */
282 	struct array_cache **alien;	/* on other nodes */
283 	unsigned long next_reap;	/* updated without locking */
284 	int free_touched;		/* updated without locking */
285 };
286 
287 /*
288  * Need this for bootstrapping a per node allocator.
289  */
290 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
291 static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
292 #define	CACHE_CACHE 0
293 #define	SIZE_AC MAX_NUMNODES
294 #define	SIZE_L3 (2 * MAX_NUMNODES)
295 
296 static int drain_freelist(struct kmem_cache *cache,
297 			struct kmem_list3 *l3, int tofree);
298 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
299 			int node);
300 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
301 static void cache_reap(struct work_struct *unused);
302 
303 /*
304  * This function must be completely optimized away if a constant is passed to
305  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
306  */
307 static __always_inline int index_of(const size_t size)
308 {
309 	extern void __bad_size(void);
310 
311 	if (__builtin_constant_p(size)) {
312 		int i = 0;
313 
314 #define CACHE(x) \
315 	if (size <=x) \
316 		return i; \
317 	else \
318 		i++;
319 #include <linux/kmalloc_sizes.h>
320 #undef CACHE
321 		__bad_size();
322 	} else
323 		__bad_size();
324 	return 0;
325 }
326 
327 static int slab_early_init = 1;
328 
329 #define INDEX_AC index_of(sizeof(struct arraycache_init))
330 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
331 
332 static void kmem_list3_init(struct kmem_list3 *parent)
333 {
334 	INIT_LIST_HEAD(&parent->slabs_full);
335 	INIT_LIST_HEAD(&parent->slabs_partial);
336 	INIT_LIST_HEAD(&parent->slabs_free);
337 	parent->shared = NULL;
338 	parent->alien = NULL;
339 	parent->colour_next = 0;
340 	spin_lock_init(&parent->list_lock);
341 	parent->free_objects = 0;
342 	parent->free_touched = 0;
343 }
344 
345 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
346 	do {								\
347 		INIT_LIST_HEAD(listp);					\
348 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
349 	} while (0)
350 
351 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
352 	do {								\
353 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
354 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
355 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
356 	} while (0)
357 
358 #define CFLGS_OFF_SLAB		(0x80000000UL)
359 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
360 
361 #define BATCHREFILL_LIMIT	16
362 /*
363  * Optimization question: fewer reaps means less probability for unnessary
364  * cpucache drain/refill cycles.
365  *
366  * OTOH the cpuarrays can contain lots of objects,
367  * which could lock up otherwise freeable slabs.
368  */
369 #define REAPTIMEOUT_CPUC	(2*HZ)
370 #define REAPTIMEOUT_LIST3	(4*HZ)
371 
372 #if STATS
373 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
374 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
375 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
376 #define	STATS_INC_GROWN(x)	((x)->grown++)
377 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
378 #define	STATS_SET_HIGH(x)						\
379 	do {								\
380 		if ((x)->num_active > (x)->high_mark)			\
381 			(x)->high_mark = (x)->num_active;		\
382 	} while (0)
383 #define	STATS_INC_ERR(x)	((x)->errors++)
384 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
385 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
386 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
387 #define	STATS_SET_FREEABLE(x, i)					\
388 	do {								\
389 		if ((x)->max_freeable < i)				\
390 			(x)->max_freeable = i;				\
391 	} while (0)
392 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
393 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
394 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
395 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
396 #else
397 #define	STATS_INC_ACTIVE(x)	do { } while (0)
398 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
399 #define	STATS_INC_ALLOCED(x)	do { } while (0)
400 #define	STATS_INC_GROWN(x)	do { } while (0)
401 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
402 #define	STATS_SET_HIGH(x)	do { } while (0)
403 #define	STATS_INC_ERR(x)	do { } while (0)
404 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
405 #define	STATS_INC_NODEFREES(x)	do { } while (0)
406 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
407 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
408 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
409 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
410 #define STATS_INC_FREEHIT(x)	do { } while (0)
411 #define STATS_INC_FREEMISS(x)	do { } while (0)
412 #endif
413 
414 #if DEBUG
415 
416 /*
417  * memory layout of objects:
418  * 0		: objp
419  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
420  * 		the end of an object is aligned with the end of the real
421  * 		allocation. Catches writes behind the end of the allocation.
422  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
423  * 		redzone word.
424  * cachep->obj_offset: The real object.
425  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
426  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
427  *					[BYTES_PER_WORD long]
428  */
429 static int obj_offset(struct kmem_cache *cachep)
430 {
431 	return cachep->obj_offset;
432 }
433 
434 static int obj_size(struct kmem_cache *cachep)
435 {
436 	return cachep->obj_size;
437 }
438 
439 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
440 {
441 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
442 	return (unsigned long long*) (objp + obj_offset(cachep) -
443 				      sizeof(unsigned long long));
444 }
445 
446 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
447 {
448 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
449 	if (cachep->flags & SLAB_STORE_USER)
450 		return (unsigned long long *)(objp + cachep->buffer_size -
451 					      sizeof(unsigned long long) -
452 					      REDZONE_ALIGN);
453 	return (unsigned long long *) (objp + cachep->buffer_size -
454 				       sizeof(unsigned long long));
455 }
456 
457 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
458 {
459 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
460 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
461 }
462 
463 #else
464 
465 #define obj_offset(x)			0
466 #define obj_size(cachep)		(cachep->buffer_size)
467 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
468 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
469 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
470 
471 #endif
472 
473 #ifdef CONFIG_TRACING
474 size_t slab_buffer_size(struct kmem_cache *cachep)
475 {
476 	return cachep->buffer_size;
477 }
478 EXPORT_SYMBOL(slab_buffer_size);
479 #endif
480 
481 /*
482  * Do not go above this order unless 0 objects fit into the slab.
483  */
484 #define	BREAK_GFP_ORDER_HI	1
485 #define	BREAK_GFP_ORDER_LO	0
486 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
487 
488 /*
489  * Functions for storing/retrieving the cachep and or slab from the page
490  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
491  * these are used to find the cache which an obj belongs to.
492  */
493 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
494 {
495 	page->lru.next = (struct list_head *)cache;
496 }
497 
498 static inline struct kmem_cache *page_get_cache(struct page *page)
499 {
500 	page = compound_head(page);
501 	BUG_ON(!PageSlab(page));
502 	return (struct kmem_cache *)page->lru.next;
503 }
504 
505 static inline void page_set_slab(struct page *page, struct slab *slab)
506 {
507 	page->lru.prev = (struct list_head *)slab;
508 }
509 
510 static inline struct slab *page_get_slab(struct page *page)
511 {
512 	BUG_ON(!PageSlab(page));
513 	return (struct slab *)page->lru.prev;
514 }
515 
516 static inline struct kmem_cache *virt_to_cache(const void *obj)
517 {
518 	struct page *page = virt_to_head_page(obj);
519 	return page_get_cache(page);
520 }
521 
522 static inline struct slab *virt_to_slab(const void *obj)
523 {
524 	struct page *page = virt_to_head_page(obj);
525 	return page_get_slab(page);
526 }
527 
528 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
529 				 unsigned int idx)
530 {
531 	return slab->s_mem + cache->buffer_size * idx;
532 }
533 
534 /*
535  * We want to avoid an expensive divide : (offset / cache->buffer_size)
536  *   Using the fact that buffer_size is a constant for a particular cache,
537  *   we can replace (offset / cache->buffer_size) by
538  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
539  */
540 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
541 					const struct slab *slab, void *obj)
542 {
543 	u32 offset = (obj - slab->s_mem);
544 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
545 }
546 
547 /*
548  * These are the default caches for kmalloc. Custom caches can have other sizes.
549  */
550 struct cache_sizes malloc_sizes[] = {
551 #define CACHE(x) { .cs_size = (x) },
552 #include <linux/kmalloc_sizes.h>
553 	CACHE(ULONG_MAX)
554 #undef CACHE
555 };
556 EXPORT_SYMBOL(malloc_sizes);
557 
558 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
559 struct cache_names {
560 	char *name;
561 	char *name_dma;
562 };
563 
564 static struct cache_names __initdata cache_names[] = {
565 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
566 #include <linux/kmalloc_sizes.h>
567 	{NULL,}
568 #undef CACHE
569 };
570 
571 static struct arraycache_init initarray_cache __initdata =
572     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
573 static struct arraycache_init initarray_generic =
574     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
575 
576 /* internal cache of cache description objs */
577 static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
578 static struct kmem_cache cache_cache = {
579 	.nodelists = cache_cache_nodelists,
580 	.batchcount = 1,
581 	.limit = BOOT_CPUCACHE_ENTRIES,
582 	.shared = 1,
583 	.buffer_size = sizeof(struct kmem_cache),
584 	.name = "kmem_cache",
585 };
586 
587 #define BAD_ALIEN_MAGIC 0x01020304ul
588 
589 /*
590  * chicken and egg problem: delay the per-cpu array allocation
591  * until the general caches are up.
592  */
593 static enum {
594 	NONE,
595 	PARTIAL_AC,
596 	PARTIAL_L3,
597 	EARLY,
598 	FULL
599 } g_cpucache_up;
600 
601 /*
602  * used by boot code to determine if it can use slab based allocator
603  */
604 int slab_is_available(void)
605 {
606 	return g_cpucache_up >= EARLY;
607 }
608 
609 #ifdef CONFIG_LOCKDEP
610 
611 /*
612  * Slab sometimes uses the kmalloc slabs to store the slab headers
613  * for other slabs "off slab".
614  * The locking for this is tricky in that it nests within the locks
615  * of all other slabs in a few places; to deal with this special
616  * locking we put on-slab caches into a separate lock-class.
617  *
618  * We set lock class for alien array caches which are up during init.
619  * The lock annotation will be lost if all cpus of a node goes down and
620  * then comes back up during hotplug
621  */
622 static struct lock_class_key on_slab_l3_key;
623 static struct lock_class_key on_slab_alc_key;
624 
625 static void init_node_lock_keys(int q)
626 {
627 	struct cache_sizes *s = malloc_sizes;
628 
629 	if (g_cpucache_up != FULL)
630 		return;
631 
632 	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
633 		struct array_cache **alc;
634 		struct kmem_list3 *l3;
635 		int r;
636 
637 		l3 = s->cs_cachep->nodelists[q];
638 		if (!l3 || OFF_SLAB(s->cs_cachep))
639 			continue;
640 		lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
641 		alc = l3->alien;
642 		/*
643 		 * FIXME: This check for BAD_ALIEN_MAGIC
644 		 * should go away when common slab code is taught to
645 		 * work even without alien caches.
646 		 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 		 * for alloc_alien_cache,
648 		 */
649 		if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 			continue;
651 		for_each_node(r) {
652 			if (alc[r])
653 				lockdep_set_class(&alc[r]->lock,
654 					&on_slab_alc_key);
655 		}
656 	}
657 }
658 
659 static inline void init_lock_keys(void)
660 {
661 	int node;
662 
663 	for_each_node(node)
664 		init_node_lock_keys(node);
665 }
666 #else
667 static void init_node_lock_keys(int q)
668 {
669 }
670 
671 static inline void init_lock_keys(void)
672 {
673 }
674 #endif
675 
676 /*
677  * Guard access to the cache-chain.
678  */
679 static DEFINE_MUTEX(cache_chain_mutex);
680 static struct list_head cache_chain;
681 
682 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
683 
684 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
685 {
686 	return cachep->array[smp_processor_id()];
687 }
688 
689 static inline struct kmem_cache *__find_general_cachep(size_t size,
690 							gfp_t gfpflags)
691 {
692 	struct cache_sizes *csizep = malloc_sizes;
693 
694 #if DEBUG
695 	/* This happens if someone tries to call
696 	 * kmem_cache_create(), or __kmalloc(), before
697 	 * the generic caches are initialized.
698 	 */
699 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
700 #endif
701 	if (!size)
702 		return ZERO_SIZE_PTR;
703 
704 	while (size > csizep->cs_size)
705 		csizep++;
706 
707 	/*
708 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
709 	 * has cs_{dma,}cachep==NULL. Thus no special case
710 	 * for large kmalloc calls required.
711 	 */
712 #ifdef CONFIG_ZONE_DMA
713 	if (unlikely(gfpflags & GFP_DMA))
714 		return csizep->cs_dmacachep;
715 #endif
716 	return csizep->cs_cachep;
717 }
718 
719 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
720 {
721 	return __find_general_cachep(size, gfpflags);
722 }
723 
724 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
725 {
726 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
727 }
728 
729 /*
730  * Calculate the number of objects and left-over bytes for a given buffer size.
731  */
732 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
733 			   size_t align, int flags, size_t *left_over,
734 			   unsigned int *num)
735 {
736 	int nr_objs;
737 	size_t mgmt_size;
738 	size_t slab_size = PAGE_SIZE << gfporder;
739 
740 	/*
741 	 * The slab management structure can be either off the slab or
742 	 * on it. For the latter case, the memory allocated for a
743 	 * slab is used for:
744 	 *
745 	 * - The struct slab
746 	 * - One kmem_bufctl_t for each object
747 	 * - Padding to respect alignment of @align
748 	 * - @buffer_size bytes for each object
749 	 *
750 	 * If the slab management structure is off the slab, then the
751 	 * alignment will already be calculated into the size. Because
752 	 * the slabs are all pages aligned, the objects will be at the
753 	 * correct alignment when allocated.
754 	 */
755 	if (flags & CFLGS_OFF_SLAB) {
756 		mgmt_size = 0;
757 		nr_objs = slab_size / buffer_size;
758 
759 		if (nr_objs > SLAB_LIMIT)
760 			nr_objs = SLAB_LIMIT;
761 	} else {
762 		/*
763 		 * Ignore padding for the initial guess. The padding
764 		 * is at most @align-1 bytes, and @buffer_size is at
765 		 * least @align. In the worst case, this result will
766 		 * be one greater than the number of objects that fit
767 		 * into the memory allocation when taking the padding
768 		 * into account.
769 		 */
770 		nr_objs = (slab_size - sizeof(struct slab)) /
771 			  (buffer_size + sizeof(kmem_bufctl_t));
772 
773 		/*
774 		 * This calculated number will be either the right
775 		 * amount, or one greater than what we want.
776 		 */
777 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
778 		       > slab_size)
779 			nr_objs--;
780 
781 		if (nr_objs > SLAB_LIMIT)
782 			nr_objs = SLAB_LIMIT;
783 
784 		mgmt_size = slab_mgmt_size(nr_objs, align);
785 	}
786 	*num = nr_objs;
787 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
788 }
789 
790 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
791 
792 static void __slab_error(const char *function, struct kmem_cache *cachep,
793 			char *msg)
794 {
795 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
796 	       function, cachep->name, msg);
797 	dump_stack();
798 }
799 
800 /*
801  * By default on NUMA we use alien caches to stage the freeing of
802  * objects allocated from other nodes. This causes massive memory
803  * inefficiencies when using fake NUMA setup to split memory into a
804  * large number of small nodes, so it can be disabled on the command
805  * line
806   */
807 
808 static int use_alien_caches __read_mostly = 1;
809 static int __init noaliencache_setup(char *s)
810 {
811 	use_alien_caches = 0;
812 	return 1;
813 }
814 __setup("noaliencache", noaliencache_setup);
815 
816 #ifdef CONFIG_NUMA
817 /*
818  * Special reaping functions for NUMA systems called from cache_reap().
819  * These take care of doing round robin flushing of alien caches (containing
820  * objects freed on different nodes from which they were allocated) and the
821  * flushing of remote pcps by calling drain_node_pages.
822  */
823 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
824 
825 static void init_reap_node(int cpu)
826 {
827 	int node;
828 
829 	node = next_node(cpu_to_mem(cpu), node_online_map);
830 	if (node == MAX_NUMNODES)
831 		node = first_node(node_online_map);
832 
833 	per_cpu(slab_reap_node, cpu) = node;
834 }
835 
836 static void next_reap_node(void)
837 {
838 	int node = __this_cpu_read(slab_reap_node);
839 
840 	node = next_node(node, node_online_map);
841 	if (unlikely(node >= MAX_NUMNODES))
842 		node = first_node(node_online_map);
843 	__this_cpu_write(slab_reap_node, node);
844 }
845 
846 #else
847 #define init_reap_node(cpu) do { } while (0)
848 #define next_reap_node(void) do { } while (0)
849 #endif
850 
851 /*
852  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
853  * via the workqueue/eventd.
854  * Add the CPU number into the expiration time to minimize the possibility of
855  * the CPUs getting into lockstep and contending for the global cache chain
856  * lock.
857  */
858 static void __cpuinit start_cpu_timer(int cpu)
859 {
860 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
861 
862 	/*
863 	 * When this gets called from do_initcalls via cpucache_init(),
864 	 * init_workqueues() has already run, so keventd will be setup
865 	 * at that time.
866 	 */
867 	if (keventd_up() && reap_work->work.func == NULL) {
868 		init_reap_node(cpu);
869 		INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
870 		schedule_delayed_work_on(cpu, reap_work,
871 					__round_jiffies_relative(HZ, cpu));
872 	}
873 }
874 
875 static struct array_cache *alloc_arraycache(int node, int entries,
876 					    int batchcount, gfp_t gfp)
877 {
878 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
879 	struct array_cache *nc = NULL;
880 
881 	nc = kmalloc_node(memsize, gfp, node);
882 	/*
883 	 * The array_cache structures contain pointers to free object.
884 	 * However, when such objects are allocated or transferred to another
885 	 * cache the pointers are not cleared and they could be counted as
886 	 * valid references during a kmemleak scan. Therefore, kmemleak must
887 	 * not scan such objects.
888 	 */
889 	kmemleak_no_scan(nc);
890 	if (nc) {
891 		nc->avail = 0;
892 		nc->limit = entries;
893 		nc->batchcount = batchcount;
894 		nc->touched = 0;
895 		spin_lock_init(&nc->lock);
896 	}
897 	return nc;
898 }
899 
900 /*
901  * Transfer objects in one arraycache to another.
902  * Locking must be handled by the caller.
903  *
904  * Return the number of entries transferred.
905  */
906 static int transfer_objects(struct array_cache *to,
907 		struct array_cache *from, unsigned int max)
908 {
909 	/* Figure out how many entries to transfer */
910 	int nr = min3(from->avail, max, to->limit - to->avail);
911 
912 	if (!nr)
913 		return 0;
914 
915 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
916 			sizeof(void *) *nr);
917 
918 	from->avail -= nr;
919 	to->avail += nr;
920 	return nr;
921 }
922 
923 #ifndef CONFIG_NUMA
924 
925 #define drain_alien_cache(cachep, alien) do { } while (0)
926 #define reap_alien(cachep, l3) do { } while (0)
927 
928 static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
929 {
930 	return (struct array_cache **)BAD_ALIEN_MAGIC;
931 }
932 
933 static inline void free_alien_cache(struct array_cache **ac_ptr)
934 {
935 }
936 
937 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
938 {
939 	return 0;
940 }
941 
942 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
943 		gfp_t flags)
944 {
945 	return NULL;
946 }
947 
948 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
949 		 gfp_t flags, int nodeid)
950 {
951 	return NULL;
952 }
953 
954 #else	/* CONFIG_NUMA */
955 
956 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
957 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
958 
959 static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
960 {
961 	struct array_cache **ac_ptr;
962 	int memsize = sizeof(void *) * nr_node_ids;
963 	int i;
964 
965 	if (limit > 1)
966 		limit = 12;
967 	ac_ptr = kzalloc_node(memsize, gfp, node);
968 	if (ac_ptr) {
969 		for_each_node(i) {
970 			if (i == node || !node_online(i))
971 				continue;
972 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
973 			if (!ac_ptr[i]) {
974 				for (i--; i >= 0; i--)
975 					kfree(ac_ptr[i]);
976 				kfree(ac_ptr);
977 				return NULL;
978 			}
979 		}
980 	}
981 	return ac_ptr;
982 }
983 
984 static void free_alien_cache(struct array_cache **ac_ptr)
985 {
986 	int i;
987 
988 	if (!ac_ptr)
989 		return;
990 	for_each_node(i)
991 	    kfree(ac_ptr[i]);
992 	kfree(ac_ptr);
993 }
994 
995 static void __drain_alien_cache(struct kmem_cache *cachep,
996 				struct array_cache *ac, int node)
997 {
998 	struct kmem_list3 *rl3 = cachep->nodelists[node];
999 
1000 	if (ac->avail) {
1001 		spin_lock(&rl3->list_lock);
1002 		/*
1003 		 * Stuff objects into the remote nodes shared array first.
1004 		 * That way we could avoid the overhead of putting the objects
1005 		 * into the free lists and getting them back later.
1006 		 */
1007 		if (rl3->shared)
1008 			transfer_objects(rl3->shared, ac, ac->limit);
1009 
1010 		free_block(cachep, ac->entry, ac->avail, node);
1011 		ac->avail = 0;
1012 		spin_unlock(&rl3->list_lock);
1013 	}
1014 }
1015 
1016 /*
1017  * Called from cache_reap() to regularly drain alien caches round robin.
1018  */
1019 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1020 {
1021 	int node = __this_cpu_read(slab_reap_node);
1022 
1023 	if (l3->alien) {
1024 		struct array_cache *ac = l3->alien[node];
1025 
1026 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1027 			__drain_alien_cache(cachep, ac, node);
1028 			spin_unlock_irq(&ac->lock);
1029 		}
1030 	}
1031 }
1032 
1033 static void drain_alien_cache(struct kmem_cache *cachep,
1034 				struct array_cache **alien)
1035 {
1036 	int i = 0;
1037 	struct array_cache *ac;
1038 	unsigned long flags;
1039 
1040 	for_each_online_node(i) {
1041 		ac = alien[i];
1042 		if (ac) {
1043 			spin_lock_irqsave(&ac->lock, flags);
1044 			__drain_alien_cache(cachep, ac, i);
1045 			spin_unlock_irqrestore(&ac->lock, flags);
1046 		}
1047 	}
1048 }
1049 
1050 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1051 {
1052 	struct slab *slabp = virt_to_slab(objp);
1053 	int nodeid = slabp->nodeid;
1054 	struct kmem_list3 *l3;
1055 	struct array_cache *alien = NULL;
1056 	int node;
1057 
1058 	node = numa_mem_id();
1059 
1060 	/*
1061 	 * Make sure we are not freeing a object from another node to the array
1062 	 * cache on this cpu.
1063 	 */
1064 	if (likely(slabp->nodeid == node))
1065 		return 0;
1066 
1067 	l3 = cachep->nodelists[node];
1068 	STATS_INC_NODEFREES(cachep);
1069 	if (l3->alien && l3->alien[nodeid]) {
1070 		alien = l3->alien[nodeid];
1071 		spin_lock(&alien->lock);
1072 		if (unlikely(alien->avail == alien->limit)) {
1073 			STATS_INC_ACOVERFLOW(cachep);
1074 			__drain_alien_cache(cachep, alien, nodeid);
1075 		}
1076 		alien->entry[alien->avail++] = objp;
1077 		spin_unlock(&alien->lock);
1078 	} else {
1079 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1080 		free_block(cachep, &objp, 1, nodeid);
1081 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1082 	}
1083 	return 1;
1084 }
1085 #endif
1086 
1087 /*
1088  * Allocates and initializes nodelists for a node on each slab cache, used for
1089  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
1090  * will be allocated off-node since memory is not yet online for the new node.
1091  * When hotplugging memory or a cpu, existing nodelists are not replaced if
1092  * already in use.
1093  *
1094  * Must hold cache_chain_mutex.
1095  */
1096 static int init_cache_nodelists_node(int node)
1097 {
1098 	struct kmem_cache *cachep;
1099 	struct kmem_list3 *l3;
1100 	const int memsize = sizeof(struct kmem_list3);
1101 
1102 	list_for_each_entry(cachep, &cache_chain, next) {
1103 		/*
1104 		 * Set up the size64 kmemlist for cpu before we can
1105 		 * begin anything. Make sure some other cpu on this
1106 		 * node has not already allocated this
1107 		 */
1108 		if (!cachep->nodelists[node]) {
1109 			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1110 			if (!l3)
1111 				return -ENOMEM;
1112 			kmem_list3_init(l3);
1113 			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1114 			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1115 
1116 			/*
1117 			 * The l3s don't come and go as CPUs come and
1118 			 * go.  cache_chain_mutex is sufficient
1119 			 * protection here.
1120 			 */
1121 			cachep->nodelists[node] = l3;
1122 		}
1123 
1124 		spin_lock_irq(&cachep->nodelists[node]->list_lock);
1125 		cachep->nodelists[node]->free_limit =
1126 			(1 + nr_cpus_node(node)) *
1127 			cachep->batchcount + cachep->num;
1128 		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1129 	}
1130 	return 0;
1131 }
1132 
1133 static void __cpuinit cpuup_canceled(long cpu)
1134 {
1135 	struct kmem_cache *cachep;
1136 	struct kmem_list3 *l3 = NULL;
1137 	int node = cpu_to_mem(cpu);
1138 	const struct cpumask *mask = cpumask_of_node(node);
1139 
1140 	list_for_each_entry(cachep, &cache_chain, next) {
1141 		struct array_cache *nc;
1142 		struct array_cache *shared;
1143 		struct array_cache **alien;
1144 
1145 		/* cpu is dead; no one can alloc from it. */
1146 		nc = cachep->array[cpu];
1147 		cachep->array[cpu] = NULL;
1148 		l3 = cachep->nodelists[node];
1149 
1150 		if (!l3)
1151 			goto free_array_cache;
1152 
1153 		spin_lock_irq(&l3->list_lock);
1154 
1155 		/* Free limit for this kmem_list3 */
1156 		l3->free_limit -= cachep->batchcount;
1157 		if (nc)
1158 			free_block(cachep, nc->entry, nc->avail, node);
1159 
1160 		if (!cpumask_empty(mask)) {
1161 			spin_unlock_irq(&l3->list_lock);
1162 			goto free_array_cache;
1163 		}
1164 
1165 		shared = l3->shared;
1166 		if (shared) {
1167 			free_block(cachep, shared->entry,
1168 				   shared->avail, node);
1169 			l3->shared = NULL;
1170 		}
1171 
1172 		alien = l3->alien;
1173 		l3->alien = NULL;
1174 
1175 		spin_unlock_irq(&l3->list_lock);
1176 
1177 		kfree(shared);
1178 		if (alien) {
1179 			drain_alien_cache(cachep, alien);
1180 			free_alien_cache(alien);
1181 		}
1182 free_array_cache:
1183 		kfree(nc);
1184 	}
1185 	/*
1186 	 * In the previous loop, all the objects were freed to
1187 	 * the respective cache's slabs,  now we can go ahead and
1188 	 * shrink each nodelist to its limit.
1189 	 */
1190 	list_for_each_entry(cachep, &cache_chain, next) {
1191 		l3 = cachep->nodelists[node];
1192 		if (!l3)
1193 			continue;
1194 		drain_freelist(cachep, l3, l3->free_objects);
1195 	}
1196 }
1197 
1198 static int __cpuinit cpuup_prepare(long cpu)
1199 {
1200 	struct kmem_cache *cachep;
1201 	struct kmem_list3 *l3 = NULL;
1202 	int node = cpu_to_mem(cpu);
1203 	int err;
1204 
1205 	/*
1206 	 * We need to do this right in the beginning since
1207 	 * alloc_arraycache's are going to use this list.
1208 	 * kmalloc_node allows us to add the slab to the right
1209 	 * kmem_list3 and not this cpu's kmem_list3
1210 	 */
1211 	err = init_cache_nodelists_node(node);
1212 	if (err < 0)
1213 		goto bad;
1214 
1215 	/*
1216 	 * Now we can go ahead with allocating the shared arrays and
1217 	 * array caches
1218 	 */
1219 	list_for_each_entry(cachep, &cache_chain, next) {
1220 		struct array_cache *nc;
1221 		struct array_cache *shared = NULL;
1222 		struct array_cache **alien = NULL;
1223 
1224 		nc = alloc_arraycache(node, cachep->limit,
1225 					cachep->batchcount, GFP_KERNEL);
1226 		if (!nc)
1227 			goto bad;
1228 		if (cachep->shared) {
1229 			shared = alloc_arraycache(node,
1230 				cachep->shared * cachep->batchcount,
1231 				0xbaadf00d, GFP_KERNEL);
1232 			if (!shared) {
1233 				kfree(nc);
1234 				goto bad;
1235 			}
1236 		}
1237 		if (use_alien_caches) {
1238 			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1239 			if (!alien) {
1240 				kfree(shared);
1241 				kfree(nc);
1242 				goto bad;
1243 			}
1244 		}
1245 		cachep->array[cpu] = nc;
1246 		l3 = cachep->nodelists[node];
1247 		BUG_ON(!l3);
1248 
1249 		spin_lock_irq(&l3->list_lock);
1250 		if (!l3->shared) {
1251 			/*
1252 			 * We are serialised from CPU_DEAD or
1253 			 * CPU_UP_CANCELLED by the cpucontrol lock
1254 			 */
1255 			l3->shared = shared;
1256 			shared = NULL;
1257 		}
1258 #ifdef CONFIG_NUMA
1259 		if (!l3->alien) {
1260 			l3->alien = alien;
1261 			alien = NULL;
1262 		}
1263 #endif
1264 		spin_unlock_irq(&l3->list_lock);
1265 		kfree(shared);
1266 		free_alien_cache(alien);
1267 	}
1268 	init_node_lock_keys(node);
1269 
1270 	return 0;
1271 bad:
1272 	cpuup_canceled(cpu);
1273 	return -ENOMEM;
1274 }
1275 
1276 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1277 				    unsigned long action, void *hcpu)
1278 {
1279 	long cpu = (long)hcpu;
1280 	int err = 0;
1281 
1282 	switch (action) {
1283 	case CPU_UP_PREPARE:
1284 	case CPU_UP_PREPARE_FROZEN:
1285 		mutex_lock(&cache_chain_mutex);
1286 		err = cpuup_prepare(cpu);
1287 		mutex_unlock(&cache_chain_mutex);
1288 		break;
1289 	case CPU_ONLINE:
1290 	case CPU_ONLINE_FROZEN:
1291 		start_cpu_timer(cpu);
1292 		break;
1293 #ifdef CONFIG_HOTPLUG_CPU
1294   	case CPU_DOWN_PREPARE:
1295   	case CPU_DOWN_PREPARE_FROZEN:
1296 		/*
1297 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1298 		 * held so that if cache_reap() is invoked it cannot do
1299 		 * anything expensive but will only modify reap_work
1300 		 * and reschedule the timer.
1301 		*/
1302 		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1303 		/* Now the cache_reaper is guaranteed to be not running. */
1304 		per_cpu(slab_reap_work, cpu).work.func = NULL;
1305   		break;
1306   	case CPU_DOWN_FAILED:
1307   	case CPU_DOWN_FAILED_FROZEN:
1308 		start_cpu_timer(cpu);
1309   		break;
1310 	case CPU_DEAD:
1311 	case CPU_DEAD_FROZEN:
1312 		/*
1313 		 * Even if all the cpus of a node are down, we don't free the
1314 		 * kmem_list3 of any cache. This to avoid a race between
1315 		 * cpu_down, and a kmalloc allocation from another cpu for
1316 		 * memory from the node of the cpu going down.  The list3
1317 		 * structure is usually allocated from kmem_cache_create() and
1318 		 * gets destroyed at kmem_cache_destroy().
1319 		 */
1320 		/* fall through */
1321 #endif
1322 	case CPU_UP_CANCELED:
1323 	case CPU_UP_CANCELED_FROZEN:
1324 		mutex_lock(&cache_chain_mutex);
1325 		cpuup_canceled(cpu);
1326 		mutex_unlock(&cache_chain_mutex);
1327 		break;
1328 	}
1329 	return notifier_from_errno(err);
1330 }
1331 
1332 static struct notifier_block __cpuinitdata cpucache_notifier = {
1333 	&cpuup_callback, NULL, 0
1334 };
1335 
1336 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1337 /*
1338  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1339  * Returns -EBUSY if all objects cannot be drained so that the node is not
1340  * removed.
1341  *
1342  * Must hold cache_chain_mutex.
1343  */
1344 static int __meminit drain_cache_nodelists_node(int node)
1345 {
1346 	struct kmem_cache *cachep;
1347 	int ret = 0;
1348 
1349 	list_for_each_entry(cachep, &cache_chain, next) {
1350 		struct kmem_list3 *l3;
1351 
1352 		l3 = cachep->nodelists[node];
1353 		if (!l3)
1354 			continue;
1355 
1356 		drain_freelist(cachep, l3, l3->free_objects);
1357 
1358 		if (!list_empty(&l3->slabs_full) ||
1359 		    !list_empty(&l3->slabs_partial)) {
1360 			ret = -EBUSY;
1361 			break;
1362 		}
1363 	}
1364 	return ret;
1365 }
1366 
1367 static int __meminit slab_memory_callback(struct notifier_block *self,
1368 					unsigned long action, void *arg)
1369 {
1370 	struct memory_notify *mnb = arg;
1371 	int ret = 0;
1372 	int nid;
1373 
1374 	nid = mnb->status_change_nid;
1375 	if (nid < 0)
1376 		goto out;
1377 
1378 	switch (action) {
1379 	case MEM_GOING_ONLINE:
1380 		mutex_lock(&cache_chain_mutex);
1381 		ret = init_cache_nodelists_node(nid);
1382 		mutex_unlock(&cache_chain_mutex);
1383 		break;
1384 	case MEM_GOING_OFFLINE:
1385 		mutex_lock(&cache_chain_mutex);
1386 		ret = drain_cache_nodelists_node(nid);
1387 		mutex_unlock(&cache_chain_mutex);
1388 		break;
1389 	case MEM_ONLINE:
1390 	case MEM_OFFLINE:
1391 	case MEM_CANCEL_ONLINE:
1392 	case MEM_CANCEL_OFFLINE:
1393 		break;
1394 	}
1395 out:
1396 	return notifier_from_errno(ret);
1397 }
1398 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1399 
1400 /*
1401  * swap the static kmem_list3 with kmalloced memory
1402  */
1403 static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1404 				int nodeid)
1405 {
1406 	struct kmem_list3 *ptr;
1407 
1408 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1409 	BUG_ON(!ptr);
1410 
1411 	memcpy(ptr, list, sizeof(struct kmem_list3));
1412 	/*
1413 	 * Do not assume that spinlocks can be initialized via memcpy:
1414 	 */
1415 	spin_lock_init(&ptr->list_lock);
1416 
1417 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1418 	cachep->nodelists[nodeid] = ptr;
1419 }
1420 
1421 /*
1422  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1423  * size of kmem_list3.
1424  */
1425 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1426 {
1427 	int node;
1428 
1429 	for_each_online_node(node) {
1430 		cachep->nodelists[node] = &initkmem_list3[index + node];
1431 		cachep->nodelists[node]->next_reap = jiffies +
1432 		    REAPTIMEOUT_LIST3 +
1433 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1434 	}
1435 }
1436 
1437 /*
1438  * Initialisation.  Called after the page allocator have been initialised and
1439  * before smp_init().
1440  */
1441 void __init kmem_cache_init(void)
1442 {
1443 	size_t left_over;
1444 	struct cache_sizes *sizes;
1445 	struct cache_names *names;
1446 	int i;
1447 	int order;
1448 	int node;
1449 
1450 	if (num_possible_nodes() == 1)
1451 		use_alien_caches = 0;
1452 
1453 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1454 		kmem_list3_init(&initkmem_list3[i]);
1455 		if (i < MAX_NUMNODES)
1456 			cache_cache.nodelists[i] = NULL;
1457 	}
1458 	set_up_list3s(&cache_cache, CACHE_CACHE);
1459 
1460 	/*
1461 	 * Fragmentation resistance on low memory - only use bigger
1462 	 * page orders on machines with more than 32MB of memory.
1463 	 */
1464 	if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
1465 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1466 
1467 	/* Bootstrap is tricky, because several objects are allocated
1468 	 * from caches that do not exist yet:
1469 	 * 1) initialize the cache_cache cache: it contains the struct
1470 	 *    kmem_cache structures of all caches, except cache_cache itself:
1471 	 *    cache_cache is statically allocated.
1472 	 *    Initially an __init data area is used for the head array and the
1473 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1474 	 *    array at the end of the bootstrap.
1475 	 * 2) Create the first kmalloc cache.
1476 	 *    The struct kmem_cache for the new cache is allocated normally.
1477 	 *    An __init data area is used for the head array.
1478 	 * 3) Create the remaining kmalloc caches, with minimally sized
1479 	 *    head arrays.
1480 	 * 4) Replace the __init data head arrays for cache_cache and the first
1481 	 *    kmalloc cache with kmalloc allocated arrays.
1482 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1483 	 *    the other cache's with kmalloc allocated memory.
1484 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1485 	 */
1486 
1487 	node = numa_mem_id();
1488 
1489 	/* 1) create the cache_cache */
1490 	INIT_LIST_HEAD(&cache_chain);
1491 	list_add(&cache_cache.next, &cache_chain);
1492 	cache_cache.colour_off = cache_line_size();
1493 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1494 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1495 
1496 	/*
1497 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1498 	 */
1499 	cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1500 				  nr_node_ids * sizeof(struct kmem_list3 *);
1501 #if DEBUG
1502 	cache_cache.obj_size = cache_cache.buffer_size;
1503 #endif
1504 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1505 					cache_line_size());
1506 	cache_cache.reciprocal_buffer_size =
1507 		reciprocal_value(cache_cache.buffer_size);
1508 
1509 	for (order = 0; order < MAX_ORDER; order++) {
1510 		cache_estimate(order, cache_cache.buffer_size,
1511 			cache_line_size(), 0, &left_over, &cache_cache.num);
1512 		if (cache_cache.num)
1513 			break;
1514 	}
1515 	BUG_ON(!cache_cache.num);
1516 	cache_cache.gfporder = order;
1517 	cache_cache.colour = left_over / cache_cache.colour_off;
1518 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1519 				      sizeof(struct slab), cache_line_size());
1520 
1521 	/* 2+3) create the kmalloc caches */
1522 	sizes = malloc_sizes;
1523 	names = cache_names;
1524 
1525 	/*
1526 	 * Initialize the caches that provide memory for the array cache and the
1527 	 * kmem_list3 structures first.  Without this, further allocations will
1528 	 * bug.
1529 	 */
1530 
1531 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1532 					sizes[INDEX_AC].cs_size,
1533 					ARCH_KMALLOC_MINALIGN,
1534 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1535 					NULL);
1536 
1537 	if (INDEX_AC != INDEX_L3) {
1538 		sizes[INDEX_L3].cs_cachep =
1539 			kmem_cache_create(names[INDEX_L3].name,
1540 				sizes[INDEX_L3].cs_size,
1541 				ARCH_KMALLOC_MINALIGN,
1542 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1543 				NULL);
1544 	}
1545 
1546 	slab_early_init = 0;
1547 
1548 	while (sizes->cs_size != ULONG_MAX) {
1549 		/*
1550 		 * For performance, all the general caches are L1 aligned.
1551 		 * This should be particularly beneficial on SMP boxes, as it
1552 		 * eliminates "false sharing".
1553 		 * Note for systems short on memory removing the alignment will
1554 		 * allow tighter packing of the smaller caches.
1555 		 */
1556 		if (!sizes->cs_cachep) {
1557 			sizes->cs_cachep = kmem_cache_create(names->name,
1558 					sizes->cs_size,
1559 					ARCH_KMALLOC_MINALIGN,
1560 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1561 					NULL);
1562 		}
1563 #ifdef CONFIG_ZONE_DMA
1564 		sizes->cs_dmacachep = kmem_cache_create(
1565 					names->name_dma,
1566 					sizes->cs_size,
1567 					ARCH_KMALLOC_MINALIGN,
1568 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1569 						SLAB_PANIC,
1570 					NULL);
1571 #endif
1572 		sizes++;
1573 		names++;
1574 	}
1575 	/* 4) Replace the bootstrap head arrays */
1576 	{
1577 		struct array_cache *ptr;
1578 
1579 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1580 
1581 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1582 		memcpy(ptr, cpu_cache_get(&cache_cache),
1583 		       sizeof(struct arraycache_init));
1584 		/*
1585 		 * Do not assume that spinlocks can be initialized via memcpy:
1586 		 */
1587 		spin_lock_init(&ptr->lock);
1588 
1589 		cache_cache.array[smp_processor_id()] = ptr;
1590 
1591 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1592 
1593 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1594 		       != &initarray_generic.cache);
1595 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1596 		       sizeof(struct arraycache_init));
1597 		/*
1598 		 * Do not assume that spinlocks can be initialized via memcpy:
1599 		 */
1600 		spin_lock_init(&ptr->lock);
1601 
1602 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1603 		    ptr;
1604 	}
1605 	/* 5) Replace the bootstrap kmem_list3's */
1606 	{
1607 		int nid;
1608 
1609 		for_each_online_node(nid) {
1610 			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1611 
1612 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1613 				  &initkmem_list3[SIZE_AC + nid], nid);
1614 
1615 			if (INDEX_AC != INDEX_L3) {
1616 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1617 					  &initkmem_list3[SIZE_L3 + nid], nid);
1618 			}
1619 		}
1620 	}
1621 
1622 	g_cpucache_up = EARLY;
1623 }
1624 
1625 void __init kmem_cache_init_late(void)
1626 {
1627 	struct kmem_cache *cachep;
1628 
1629 	/* 6) resize the head arrays to their final sizes */
1630 	mutex_lock(&cache_chain_mutex);
1631 	list_for_each_entry(cachep, &cache_chain, next)
1632 		if (enable_cpucache(cachep, GFP_NOWAIT))
1633 			BUG();
1634 	mutex_unlock(&cache_chain_mutex);
1635 
1636 	/* Done! */
1637 	g_cpucache_up = FULL;
1638 
1639 	/* Annotate slab for lockdep -- annotate the malloc caches */
1640 	init_lock_keys();
1641 
1642 	/*
1643 	 * Register a cpu startup notifier callback that initializes
1644 	 * cpu_cache_get for all new cpus
1645 	 */
1646 	register_cpu_notifier(&cpucache_notifier);
1647 
1648 #ifdef CONFIG_NUMA
1649 	/*
1650 	 * Register a memory hotplug callback that initializes and frees
1651 	 * nodelists.
1652 	 */
1653 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1654 #endif
1655 
1656 	/*
1657 	 * The reap timers are started later, with a module init call: That part
1658 	 * of the kernel is not yet operational.
1659 	 */
1660 }
1661 
1662 static int __init cpucache_init(void)
1663 {
1664 	int cpu;
1665 
1666 	/*
1667 	 * Register the timers that return unneeded pages to the page allocator
1668 	 */
1669 	for_each_online_cpu(cpu)
1670 		start_cpu_timer(cpu);
1671 	return 0;
1672 }
1673 __initcall(cpucache_init);
1674 
1675 /*
1676  * Interface to system's page allocator. No need to hold the cache-lock.
1677  *
1678  * If we requested dmaable memory, we will get it. Even if we
1679  * did not request dmaable memory, we might get it, but that
1680  * would be relatively rare and ignorable.
1681  */
1682 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1683 {
1684 	struct page *page;
1685 	int nr_pages;
1686 	int i;
1687 
1688 #ifndef CONFIG_MMU
1689 	/*
1690 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1691 	 * requires __GFP_COMP to properly refcount higher order allocations
1692 	 */
1693 	flags |= __GFP_COMP;
1694 #endif
1695 
1696 	flags |= cachep->gfpflags;
1697 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1698 		flags |= __GFP_RECLAIMABLE;
1699 
1700 	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1701 	if (!page)
1702 		return NULL;
1703 
1704 	nr_pages = (1 << cachep->gfporder);
1705 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1706 		add_zone_page_state(page_zone(page),
1707 			NR_SLAB_RECLAIMABLE, nr_pages);
1708 	else
1709 		add_zone_page_state(page_zone(page),
1710 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1711 	for (i = 0; i < nr_pages; i++)
1712 		__SetPageSlab(page + i);
1713 
1714 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1715 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1716 
1717 		if (cachep->ctor)
1718 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1719 		else
1720 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1721 	}
1722 
1723 	return page_address(page);
1724 }
1725 
1726 /*
1727  * Interface to system's page release.
1728  */
1729 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1730 {
1731 	unsigned long i = (1 << cachep->gfporder);
1732 	struct page *page = virt_to_page(addr);
1733 	const unsigned long nr_freed = i;
1734 
1735 	kmemcheck_free_shadow(page, cachep->gfporder);
1736 
1737 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1738 		sub_zone_page_state(page_zone(page),
1739 				NR_SLAB_RECLAIMABLE, nr_freed);
1740 	else
1741 		sub_zone_page_state(page_zone(page),
1742 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1743 	while (i--) {
1744 		BUG_ON(!PageSlab(page));
1745 		__ClearPageSlab(page);
1746 		page++;
1747 	}
1748 	if (current->reclaim_state)
1749 		current->reclaim_state->reclaimed_slab += nr_freed;
1750 	free_pages((unsigned long)addr, cachep->gfporder);
1751 }
1752 
1753 static void kmem_rcu_free(struct rcu_head *head)
1754 {
1755 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1756 	struct kmem_cache *cachep = slab_rcu->cachep;
1757 
1758 	kmem_freepages(cachep, slab_rcu->addr);
1759 	if (OFF_SLAB(cachep))
1760 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1761 }
1762 
1763 #if DEBUG
1764 
1765 #ifdef CONFIG_DEBUG_PAGEALLOC
1766 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1767 			    unsigned long caller)
1768 {
1769 	int size = obj_size(cachep);
1770 
1771 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1772 
1773 	if (size < 5 * sizeof(unsigned long))
1774 		return;
1775 
1776 	*addr++ = 0x12345678;
1777 	*addr++ = caller;
1778 	*addr++ = smp_processor_id();
1779 	size -= 3 * sizeof(unsigned long);
1780 	{
1781 		unsigned long *sptr = &caller;
1782 		unsigned long svalue;
1783 
1784 		while (!kstack_end(sptr)) {
1785 			svalue = *sptr++;
1786 			if (kernel_text_address(svalue)) {
1787 				*addr++ = svalue;
1788 				size -= sizeof(unsigned long);
1789 				if (size <= sizeof(unsigned long))
1790 					break;
1791 			}
1792 		}
1793 
1794 	}
1795 	*addr++ = 0x87654321;
1796 }
1797 #endif
1798 
1799 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1800 {
1801 	int size = obj_size(cachep);
1802 	addr = &((char *)addr)[obj_offset(cachep)];
1803 
1804 	memset(addr, val, size);
1805 	*(unsigned char *)(addr + size - 1) = POISON_END;
1806 }
1807 
1808 static void dump_line(char *data, int offset, int limit)
1809 {
1810 	int i;
1811 	unsigned char error = 0;
1812 	int bad_count = 0;
1813 
1814 	printk(KERN_ERR "%03x:", offset);
1815 	for (i = 0; i < limit; i++) {
1816 		if (data[offset + i] != POISON_FREE) {
1817 			error = data[offset + i];
1818 			bad_count++;
1819 		}
1820 		printk(" %02x", (unsigned char)data[offset + i]);
1821 	}
1822 	printk("\n");
1823 
1824 	if (bad_count == 1) {
1825 		error ^= POISON_FREE;
1826 		if (!(error & (error - 1))) {
1827 			printk(KERN_ERR "Single bit error detected. Probably "
1828 					"bad RAM.\n");
1829 #ifdef CONFIG_X86
1830 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1831 					"test tool.\n");
1832 #else
1833 			printk(KERN_ERR "Run a memory test tool.\n");
1834 #endif
1835 		}
1836 	}
1837 }
1838 #endif
1839 
1840 #if DEBUG
1841 
1842 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1843 {
1844 	int i, size;
1845 	char *realobj;
1846 
1847 	if (cachep->flags & SLAB_RED_ZONE) {
1848 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1849 			*dbg_redzone1(cachep, objp),
1850 			*dbg_redzone2(cachep, objp));
1851 	}
1852 
1853 	if (cachep->flags & SLAB_STORE_USER) {
1854 		printk(KERN_ERR "Last user: [<%p>]",
1855 			*dbg_userword(cachep, objp));
1856 		print_symbol("(%s)",
1857 				(unsigned long)*dbg_userword(cachep, objp));
1858 		printk("\n");
1859 	}
1860 	realobj = (char *)objp + obj_offset(cachep);
1861 	size = obj_size(cachep);
1862 	for (i = 0; i < size && lines; i += 16, lines--) {
1863 		int limit;
1864 		limit = 16;
1865 		if (i + limit > size)
1866 			limit = size - i;
1867 		dump_line(realobj, i, limit);
1868 	}
1869 }
1870 
1871 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1872 {
1873 	char *realobj;
1874 	int size, i;
1875 	int lines = 0;
1876 
1877 	realobj = (char *)objp + obj_offset(cachep);
1878 	size = obj_size(cachep);
1879 
1880 	for (i = 0; i < size; i++) {
1881 		char exp = POISON_FREE;
1882 		if (i == size - 1)
1883 			exp = POISON_END;
1884 		if (realobj[i] != exp) {
1885 			int limit;
1886 			/* Mismatch ! */
1887 			/* Print header */
1888 			if (lines == 0) {
1889 				printk(KERN_ERR
1890 					"Slab corruption: %s start=%p, len=%d\n",
1891 					cachep->name, realobj, size);
1892 				print_objinfo(cachep, objp, 0);
1893 			}
1894 			/* Hexdump the affected line */
1895 			i = (i / 16) * 16;
1896 			limit = 16;
1897 			if (i + limit > size)
1898 				limit = size - i;
1899 			dump_line(realobj, i, limit);
1900 			i += 16;
1901 			lines++;
1902 			/* Limit to 5 lines */
1903 			if (lines > 5)
1904 				break;
1905 		}
1906 	}
1907 	if (lines != 0) {
1908 		/* Print some data about the neighboring objects, if they
1909 		 * exist:
1910 		 */
1911 		struct slab *slabp = virt_to_slab(objp);
1912 		unsigned int objnr;
1913 
1914 		objnr = obj_to_index(cachep, slabp, objp);
1915 		if (objnr) {
1916 			objp = index_to_obj(cachep, slabp, objnr - 1);
1917 			realobj = (char *)objp + obj_offset(cachep);
1918 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1919 			       realobj, size);
1920 			print_objinfo(cachep, objp, 2);
1921 		}
1922 		if (objnr + 1 < cachep->num) {
1923 			objp = index_to_obj(cachep, slabp, objnr + 1);
1924 			realobj = (char *)objp + obj_offset(cachep);
1925 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1926 			       realobj, size);
1927 			print_objinfo(cachep, objp, 2);
1928 		}
1929 	}
1930 }
1931 #endif
1932 
1933 #if DEBUG
1934 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1935 {
1936 	int i;
1937 	for (i = 0; i < cachep->num; i++) {
1938 		void *objp = index_to_obj(cachep, slabp, i);
1939 
1940 		if (cachep->flags & SLAB_POISON) {
1941 #ifdef CONFIG_DEBUG_PAGEALLOC
1942 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1943 					OFF_SLAB(cachep))
1944 				kernel_map_pages(virt_to_page(objp),
1945 					cachep->buffer_size / PAGE_SIZE, 1);
1946 			else
1947 				check_poison_obj(cachep, objp);
1948 #else
1949 			check_poison_obj(cachep, objp);
1950 #endif
1951 		}
1952 		if (cachep->flags & SLAB_RED_ZONE) {
1953 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1954 				slab_error(cachep, "start of a freed object "
1955 					   "was overwritten");
1956 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1957 				slab_error(cachep, "end of a freed object "
1958 					   "was overwritten");
1959 		}
1960 	}
1961 }
1962 #else
1963 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1964 {
1965 }
1966 #endif
1967 
1968 /**
1969  * slab_destroy - destroy and release all objects in a slab
1970  * @cachep: cache pointer being destroyed
1971  * @slabp: slab pointer being destroyed
1972  *
1973  * Destroy all the objs in a slab, and release the mem back to the system.
1974  * Before calling the slab must have been unlinked from the cache.  The
1975  * cache-lock is not held/needed.
1976  */
1977 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1978 {
1979 	void *addr = slabp->s_mem - slabp->colouroff;
1980 
1981 	slab_destroy_debugcheck(cachep, slabp);
1982 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1983 		struct slab_rcu *slab_rcu;
1984 
1985 		slab_rcu = (struct slab_rcu *)slabp;
1986 		slab_rcu->cachep = cachep;
1987 		slab_rcu->addr = addr;
1988 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1989 	} else {
1990 		kmem_freepages(cachep, addr);
1991 		if (OFF_SLAB(cachep))
1992 			kmem_cache_free(cachep->slabp_cache, slabp);
1993 	}
1994 }
1995 
1996 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1997 {
1998 	int i;
1999 	struct kmem_list3 *l3;
2000 
2001 	for_each_online_cpu(i)
2002 	    kfree(cachep->array[i]);
2003 
2004 	/* NUMA: free the list3 structures */
2005 	for_each_online_node(i) {
2006 		l3 = cachep->nodelists[i];
2007 		if (l3) {
2008 			kfree(l3->shared);
2009 			free_alien_cache(l3->alien);
2010 			kfree(l3);
2011 		}
2012 	}
2013 	kmem_cache_free(&cache_cache, cachep);
2014 }
2015 
2016 
2017 /**
2018  * calculate_slab_order - calculate size (page order) of slabs
2019  * @cachep: pointer to the cache that is being created
2020  * @size: size of objects to be created in this cache.
2021  * @align: required alignment for the objects.
2022  * @flags: slab allocation flags
2023  *
2024  * Also calculates the number of objects per slab.
2025  *
2026  * This could be made much more intelligent.  For now, try to avoid using
2027  * high order pages for slabs.  When the gfp() functions are more friendly
2028  * towards high-order requests, this should be changed.
2029  */
2030 static size_t calculate_slab_order(struct kmem_cache *cachep,
2031 			size_t size, size_t align, unsigned long flags)
2032 {
2033 	unsigned long offslab_limit;
2034 	size_t left_over = 0;
2035 	int gfporder;
2036 
2037 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2038 		unsigned int num;
2039 		size_t remainder;
2040 
2041 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2042 		if (!num)
2043 			continue;
2044 
2045 		if (flags & CFLGS_OFF_SLAB) {
2046 			/*
2047 			 * Max number of objs-per-slab for caches which
2048 			 * use off-slab slabs. Needed to avoid a possible
2049 			 * looping condition in cache_grow().
2050 			 */
2051 			offslab_limit = size - sizeof(struct slab);
2052 			offslab_limit /= sizeof(kmem_bufctl_t);
2053 
2054  			if (num > offslab_limit)
2055 				break;
2056 		}
2057 
2058 		/* Found something acceptable - save it away */
2059 		cachep->num = num;
2060 		cachep->gfporder = gfporder;
2061 		left_over = remainder;
2062 
2063 		/*
2064 		 * A VFS-reclaimable slab tends to have most allocations
2065 		 * as GFP_NOFS and we really don't want to have to be allocating
2066 		 * higher-order pages when we are unable to shrink dcache.
2067 		 */
2068 		if (flags & SLAB_RECLAIM_ACCOUNT)
2069 			break;
2070 
2071 		/*
2072 		 * Large number of objects is good, but very large slabs are
2073 		 * currently bad for the gfp()s.
2074 		 */
2075 		if (gfporder >= slab_break_gfp_order)
2076 			break;
2077 
2078 		/*
2079 		 * Acceptable internal fragmentation?
2080 		 */
2081 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2082 			break;
2083 	}
2084 	return left_over;
2085 }
2086 
2087 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2088 {
2089 	if (g_cpucache_up == FULL)
2090 		return enable_cpucache(cachep, gfp);
2091 
2092 	if (g_cpucache_up == NONE) {
2093 		/*
2094 		 * Note: the first kmem_cache_create must create the cache
2095 		 * that's used by kmalloc(24), otherwise the creation of
2096 		 * further caches will BUG().
2097 		 */
2098 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2099 
2100 		/*
2101 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2102 		 * the first cache, then we need to set up all its list3s,
2103 		 * otherwise the creation of further caches will BUG().
2104 		 */
2105 		set_up_list3s(cachep, SIZE_AC);
2106 		if (INDEX_AC == INDEX_L3)
2107 			g_cpucache_up = PARTIAL_L3;
2108 		else
2109 			g_cpucache_up = PARTIAL_AC;
2110 	} else {
2111 		cachep->array[smp_processor_id()] =
2112 			kmalloc(sizeof(struct arraycache_init), gfp);
2113 
2114 		if (g_cpucache_up == PARTIAL_AC) {
2115 			set_up_list3s(cachep, SIZE_L3);
2116 			g_cpucache_up = PARTIAL_L3;
2117 		} else {
2118 			int node;
2119 			for_each_online_node(node) {
2120 				cachep->nodelists[node] =
2121 				    kmalloc_node(sizeof(struct kmem_list3),
2122 						gfp, node);
2123 				BUG_ON(!cachep->nodelists[node]);
2124 				kmem_list3_init(cachep->nodelists[node]);
2125 			}
2126 		}
2127 	}
2128 	cachep->nodelists[numa_mem_id()]->next_reap =
2129 			jiffies + REAPTIMEOUT_LIST3 +
2130 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2131 
2132 	cpu_cache_get(cachep)->avail = 0;
2133 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2134 	cpu_cache_get(cachep)->batchcount = 1;
2135 	cpu_cache_get(cachep)->touched = 0;
2136 	cachep->batchcount = 1;
2137 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2138 	return 0;
2139 }
2140 
2141 /**
2142  * kmem_cache_create - Create a cache.
2143  * @name: A string which is used in /proc/slabinfo to identify this cache.
2144  * @size: The size of objects to be created in this cache.
2145  * @align: The required alignment for the objects.
2146  * @flags: SLAB flags
2147  * @ctor: A constructor for the objects.
2148  *
2149  * Returns a ptr to the cache on success, NULL on failure.
2150  * Cannot be called within a int, but can be interrupted.
2151  * The @ctor is run when new pages are allocated by the cache.
2152  *
2153  * @name must be valid until the cache is destroyed. This implies that
2154  * the module calling this has to destroy the cache before getting unloaded.
2155  *
2156  * The flags are
2157  *
2158  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2159  * to catch references to uninitialised memory.
2160  *
2161  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2162  * for buffer overruns.
2163  *
2164  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2165  * cacheline.  This can be beneficial if you're counting cycles as closely
2166  * as davem.
2167  */
2168 struct kmem_cache *
2169 kmem_cache_create (const char *name, size_t size, size_t align,
2170 	unsigned long flags, void (*ctor)(void *))
2171 {
2172 	size_t left_over, slab_size, ralign;
2173 	struct kmem_cache *cachep = NULL, *pc;
2174 	gfp_t gfp;
2175 
2176 	/*
2177 	 * Sanity checks... these are all serious usage bugs.
2178 	 */
2179 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2180 	    size > KMALLOC_MAX_SIZE) {
2181 		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2182 				name);
2183 		BUG();
2184 	}
2185 
2186 	/*
2187 	 * We use cache_chain_mutex to ensure a consistent view of
2188 	 * cpu_online_mask as well.  Please see cpuup_callback
2189 	 */
2190 	if (slab_is_available()) {
2191 		get_online_cpus();
2192 		mutex_lock(&cache_chain_mutex);
2193 	}
2194 
2195 	list_for_each_entry(pc, &cache_chain, next) {
2196 		char tmp;
2197 		int res;
2198 
2199 		/*
2200 		 * This happens when the module gets unloaded and doesn't
2201 		 * destroy its slab cache and no-one else reuses the vmalloc
2202 		 * area of the module.  Print a warning.
2203 		 */
2204 		res = probe_kernel_address(pc->name, tmp);
2205 		if (res) {
2206 			printk(KERN_ERR
2207 			       "SLAB: cache with size %d has lost its name\n",
2208 			       pc->buffer_size);
2209 			continue;
2210 		}
2211 
2212 		if (!strcmp(pc->name, name)) {
2213 			printk(KERN_ERR
2214 			       "kmem_cache_create: duplicate cache %s\n", name);
2215 			dump_stack();
2216 			goto oops;
2217 		}
2218 	}
2219 
2220 #if DEBUG
2221 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2222 #if FORCED_DEBUG
2223 	/*
2224 	 * Enable redzoning and last user accounting, except for caches with
2225 	 * large objects, if the increased size would increase the object size
2226 	 * above the next power of two: caches with object sizes just above a
2227 	 * power of two have a significant amount of internal fragmentation.
2228 	 */
2229 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2230 						2 * sizeof(unsigned long long)))
2231 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2232 	if (!(flags & SLAB_DESTROY_BY_RCU))
2233 		flags |= SLAB_POISON;
2234 #endif
2235 	if (flags & SLAB_DESTROY_BY_RCU)
2236 		BUG_ON(flags & SLAB_POISON);
2237 #endif
2238 	/*
2239 	 * Always checks flags, a caller might be expecting debug support which
2240 	 * isn't available.
2241 	 */
2242 	BUG_ON(flags & ~CREATE_MASK);
2243 
2244 	/*
2245 	 * Check that size is in terms of words.  This is needed to avoid
2246 	 * unaligned accesses for some archs when redzoning is used, and makes
2247 	 * sure any on-slab bufctl's are also correctly aligned.
2248 	 */
2249 	if (size & (BYTES_PER_WORD - 1)) {
2250 		size += (BYTES_PER_WORD - 1);
2251 		size &= ~(BYTES_PER_WORD - 1);
2252 	}
2253 
2254 	/* calculate the final buffer alignment: */
2255 
2256 	/* 1) arch recommendation: can be overridden for debug */
2257 	if (flags & SLAB_HWCACHE_ALIGN) {
2258 		/*
2259 		 * Default alignment: as specified by the arch code.  Except if
2260 		 * an object is really small, then squeeze multiple objects into
2261 		 * one cacheline.
2262 		 */
2263 		ralign = cache_line_size();
2264 		while (size <= ralign / 2)
2265 			ralign /= 2;
2266 	} else {
2267 		ralign = BYTES_PER_WORD;
2268 	}
2269 
2270 	/*
2271 	 * Redzoning and user store require word alignment or possibly larger.
2272 	 * Note this will be overridden by architecture or caller mandated
2273 	 * alignment if either is greater than BYTES_PER_WORD.
2274 	 */
2275 	if (flags & SLAB_STORE_USER)
2276 		ralign = BYTES_PER_WORD;
2277 
2278 	if (flags & SLAB_RED_ZONE) {
2279 		ralign = REDZONE_ALIGN;
2280 		/* If redzoning, ensure that the second redzone is suitably
2281 		 * aligned, by adjusting the object size accordingly. */
2282 		size += REDZONE_ALIGN - 1;
2283 		size &= ~(REDZONE_ALIGN - 1);
2284 	}
2285 
2286 	/* 2) arch mandated alignment */
2287 	if (ralign < ARCH_SLAB_MINALIGN) {
2288 		ralign = ARCH_SLAB_MINALIGN;
2289 	}
2290 	/* 3) caller mandated alignment */
2291 	if (ralign < align) {
2292 		ralign = align;
2293 	}
2294 	/* disable debug if necessary */
2295 	if (ralign > __alignof__(unsigned long long))
2296 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2297 	/*
2298 	 * 4) Store it.
2299 	 */
2300 	align = ralign;
2301 
2302 	if (slab_is_available())
2303 		gfp = GFP_KERNEL;
2304 	else
2305 		gfp = GFP_NOWAIT;
2306 
2307 	/* Get cache's description obj. */
2308 	cachep = kmem_cache_zalloc(&cache_cache, gfp);
2309 	if (!cachep)
2310 		goto oops;
2311 
2312 	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2313 #if DEBUG
2314 	cachep->obj_size = size;
2315 
2316 	/*
2317 	 * Both debugging options require word-alignment which is calculated
2318 	 * into align above.
2319 	 */
2320 	if (flags & SLAB_RED_ZONE) {
2321 		/* add space for red zone words */
2322 		cachep->obj_offset += sizeof(unsigned long long);
2323 		size += 2 * sizeof(unsigned long long);
2324 	}
2325 	if (flags & SLAB_STORE_USER) {
2326 		/* user store requires one word storage behind the end of
2327 		 * the real object. But if the second red zone needs to be
2328 		 * aligned to 64 bits, we must allow that much space.
2329 		 */
2330 		if (flags & SLAB_RED_ZONE)
2331 			size += REDZONE_ALIGN;
2332 		else
2333 			size += BYTES_PER_WORD;
2334 	}
2335 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2336 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2337 	    && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2338 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2339 		size = PAGE_SIZE;
2340 	}
2341 #endif
2342 #endif
2343 
2344 	/*
2345 	 * Determine if the slab management is 'on' or 'off' slab.
2346 	 * (bootstrapping cannot cope with offslab caches so don't do
2347 	 * it too early on. Always use on-slab management when
2348 	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2349 	 */
2350 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2351 	    !(flags & SLAB_NOLEAKTRACE))
2352 		/*
2353 		 * Size is large, assume best to place the slab management obj
2354 		 * off-slab (should allow better packing of objs).
2355 		 */
2356 		flags |= CFLGS_OFF_SLAB;
2357 
2358 	size = ALIGN(size, align);
2359 
2360 	left_over = calculate_slab_order(cachep, size, align, flags);
2361 
2362 	if (!cachep->num) {
2363 		printk(KERN_ERR
2364 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2365 		kmem_cache_free(&cache_cache, cachep);
2366 		cachep = NULL;
2367 		goto oops;
2368 	}
2369 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2370 			  + sizeof(struct slab), align);
2371 
2372 	/*
2373 	 * If the slab has been placed off-slab, and we have enough space then
2374 	 * move it on-slab. This is at the expense of any extra colouring.
2375 	 */
2376 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2377 		flags &= ~CFLGS_OFF_SLAB;
2378 		left_over -= slab_size;
2379 	}
2380 
2381 	if (flags & CFLGS_OFF_SLAB) {
2382 		/* really off slab. No need for manual alignment */
2383 		slab_size =
2384 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2385 
2386 #ifdef CONFIG_PAGE_POISONING
2387 		/* If we're going to use the generic kernel_map_pages()
2388 		 * poisoning, then it's going to smash the contents of
2389 		 * the redzone and userword anyhow, so switch them off.
2390 		 */
2391 		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2392 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2393 #endif
2394 	}
2395 
2396 	cachep->colour_off = cache_line_size();
2397 	/* Offset must be a multiple of the alignment. */
2398 	if (cachep->colour_off < align)
2399 		cachep->colour_off = align;
2400 	cachep->colour = left_over / cachep->colour_off;
2401 	cachep->slab_size = slab_size;
2402 	cachep->flags = flags;
2403 	cachep->gfpflags = 0;
2404 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2405 		cachep->gfpflags |= GFP_DMA;
2406 	cachep->buffer_size = size;
2407 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2408 
2409 	if (flags & CFLGS_OFF_SLAB) {
2410 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2411 		/*
2412 		 * This is a possibility for one of the malloc_sizes caches.
2413 		 * But since we go off slab only for object size greater than
2414 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2415 		 * this should not happen at all.
2416 		 * But leave a BUG_ON for some lucky dude.
2417 		 */
2418 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2419 	}
2420 	cachep->ctor = ctor;
2421 	cachep->name = name;
2422 
2423 	if (setup_cpu_cache(cachep, gfp)) {
2424 		__kmem_cache_destroy(cachep);
2425 		cachep = NULL;
2426 		goto oops;
2427 	}
2428 
2429 	/* cache setup completed, link it into the list */
2430 	list_add(&cachep->next, &cache_chain);
2431 oops:
2432 	if (!cachep && (flags & SLAB_PANIC))
2433 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2434 		      name);
2435 	if (slab_is_available()) {
2436 		mutex_unlock(&cache_chain_mutex);
2437 		put_online_cpus();
2438 	}
2439 	return cachep;
2440 }
2441 EXPORT_SYMBOL(kmem_cache_create);
2442 
2443 #if DEBUG
2444 static void check_irq_off(void)
2445 {
2446 	BUG_ON(!irqs_disabled());
2447 }
2448 
2449 static void check_irq_on(void)
2450 {
2451 	BUG_ON(irqs_disabled());
2452 }
2453 
2454 static void check_spinlock_acquired(struct kmem_cache *cachep)
2455 {
2456 #ifdef CONFIG_SMP
2457 	check_irq_off();
2458 	assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2459 #endif
2460 }
2461 
2462 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2463 {
2464 #ifdef CONFIG_SMP
2465 	check_irq_off();
2466 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2467 #endif
2468 }
2469 
2470 #else
2471 #define check_irq_off()	do { } while(0)
2472 #define check_irq_on()	do { } while(0)
2473 #define check_spinlock_acquired(x) do { } while(0)
2474 #define check_spinlock_acquired_node(x, y) do { } while(0)
2475 #endif
2476 
2477 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2478 			struct array_cache *ac,
2479 			int force, int node);
2480 
2481 static void do_drain(void *arg)
2482 {
2483 	struct kmem_cache *cachep = arg;
2484 	struct array_cache *ac;
2485 	int node = numa_mem_id();
2486 
2487 	check_irq_off();
2488 	ac = cpu_cache_get(cachep);
2489 	spin_lock(&cachep->nodelists[node]->list_lock);
2490 	free_block(cachep, ac->entry, ac->avail, node);
2491 	spin_unlock(&cachep->nodelists[node]->list_lock);
2492 	ac->avail = 0;
2493 }
2494 
2495 static void drain_cpu_caches(struct kmem_cache *cachep)
2496 {
2497 	struct kmem_list3 *l3;
2498 	int node;
2499 
2500 	on_each_cpu(do_drain, cachep, 1);
2501 	check_irq_on();
2502 	for_each_online_node(node) {
2503 		l3 = cachep->nodelists[node];
2504 		if (l3 && l3->alien)
2505 			drain_alien_cache(cachep, l3->alien);
2506 	}
2507 
2508 	for_each_online_node(node) {
2509 		l3 = cachep->nodelists[node];
2510 		if (l3)
2511 			drain_array(cachep, l3, l3->shared, 1, node);
2512 	}
2513 }
2514 
2515 /*
2516  * Remove slabs from the list of free slabs.
2517  * Specify the number of slabs to drain in tofree.
2518  *
2519  * Returns the actual number of slabs released.
2520  */
2521 static int drain_freelist(struct kmem_cache *cache,
2522 			struct kmem_list3 *l3, int tofree)
2523 {
2524 	struct list_head *p;
2525 	int nr_freed;
2526 	struct slab *slabp;
2527 
2528 	nr_freed = 0;
2529 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2530 
2531 		spin_lock_irq(&l3->list_lock);
2532 		p = l3->slabs_free.prev;
2533 		if (p == &l3->slabs_free) {
2534 			spin_unlock_irq(&l3->list_lock);
2535 			goto out;
2536 		}
2537 
2538 		slabp = list_entry(p, struct slab, list);
2539 #if DEBUG
2540 		BUG_ON(slabp->inuse);
2541 #endif
2542 		list_del(&slabp->list);
2543 		/*
2544 		 * Safe to drop the lock. The slab is no longer linked
2545 		 * to the cache.
2546 		 */
2547 		l3->free_objects -= cache->num;
2548 		spin_unlock_irq(&l3->list_lock);
2549 		slab_destroy(cache, slabp);
2550 		nr_freed++;
2551 	}
2552 out:
2553 	return nr_freed;
2554 }
2555 
2556 /* Called with cache_chain_mutex held to protect against cpu hotplug */
2557 static int __cache_shrink(struct kmem_cache *cachep)
2558 {
2559 	int ret = 0, i = 0;
2560 	struct kmem_list3 *l3;
2561 
2562 	drain_cpu_caches(cachep);
2563 
2564 	check_irq_on();
2565 	for_each_online_node(i) {
2566 		l3 = cachep->nodelists[i];
2567 		if (!l3)
2568 			continue;
2569 
2570 		drain_freelist(cachep, l3, l3->free_objects);
2571 
2572 		ret += !list_empty(&l3->slabs_full) ||
2573 			!list_empty(&l3->slabs_partial);
2574 	}
2575 	return (ret ? 1 : 0);
2576 }
2577 
2578 /**
2579  * kmem_cache_shrink - Shrink a cache.
2580  * @cachep: The cache to shrink.
2581  *
2582  * Releases as many slabs as possible for a cache.
2583  * To help debugging, a zero exit status indicates all slabs were released.
2584  */
2585 int kmem_cache_shrink(struct kmem_cache *cachep)
2586 {
2587 	int ret;
2588 	BUG_ON(!cachep || in_interrupt());
2589 
2590 	get_online_cpus();
2591 	mutex_lock(&cache_chain_mutex);
2592 	ret = __cache_shrink(cachep);
2593 	mutex_unlock(&cache_chain_mutex);
2594 	put_online_cpus();
2595 	return ret;
2596 }
2597 EXPORT_SYMBOL(kmem_cache_shrink);
2598 
2599 /**
2600  * kmem_cache_destroy - delete a cache
2601  * @cachep: the cache to destroy
2602  *
2603  * Remove a &struct kmem_cache object from the slab cache.
2604  *
2605  * It is expected this function will be called by a module when it is
2606  * unloaded.  This will remove the cache completely, and avoid a duplicate
2607  * cache being allocated each time a module is loaded and unloaded, if the
2608  * module doesn't have persistent in-kernel storage across loads and unloads.
2609  *
2610  * The cache must be empty before calling this function.
2611  *
2612  * The caller must guarantee that no one will allocate memory from the cache
2613  * during the kmem_cache_destroy().
2614  */
2615 void kmem_cache_destroy(struct kmem_cache *cachep)
2616 {
2617 	BUG_ON(!cachep || in_interrupt());
2618 
2619 	/* Find the cache in the chain of caches. */
2620 	get_online_cpus();
2621 	mutex_lock(&cache_chain_mutex);
2622 	/*
2623 	 * the chain is never empty, cache_cache is never destroyed
2624 	 */
2625 	list_del(&cachep->next);
2626 	if (__cache_shrink(cachep)) {
2627 		slab_error(cachep, "Can't free all objects");
2628 		list_add(&cachep->next, &cache_chain);
2629 		mutex_unlock(&cache_chain_mutex);
2630 		put_online_cpus();
2631 		return;
2632 	}
2633 
2634 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2635 		rcu_barrier();
2636 
2637 	__kmem_cache_destroy(cachep);
2638 	mutex_unlock(&cache_chain_mutex);
2639 	put_online_cpus();
2640 }
2641 EXPORT_SYMBOL(kmem_cache_destroy);
2642 
2643 /*
2644  * Get the memory for a slab management obj.
2645  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2646  * always come from malloc_sizes caches.  The slab descriptor cannot
2647  * come from the same cache which is getting created because,
2648  * when we are searching for an appropriate cache for these
2649  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2650  * If we are creating a malloc_sizes cache here it would not be visible to
2651  * kmem_find_general_cachep till the initialization is complete.
2652  * Hence we cannot have slabp_cache same as the original cache.
2653  */
2654 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2655 				   int colour_off, gfp_t local_flags,
2656 				   int nodeid)
2657 {
2658 	struct slab *slabp;
2659 
2660 	if (OFF_SLAB(cachep)) {
2661 		/* Slab management obj is off-slab. */
2662 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2663 					      local_flags, nodeid);
2664 		/*
2665 		 * If the first object in the slab is leaked (it's allocated
2666 		 * but no one has a reference to it), we want to make sure
2667 		 * kmemleak does not treat the ->s_mem pointer as a reference
2668 		 * to the object. Otherwise we will not report the leak.
2669 		 */
2670 		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2671 				   local_flags);
2672 		if (!slabp)
2673 			return NULL;
2674 	} else {
2675 		slabp = objp + colour_off;
2676 		colour_off += cachep->slab_size;
2677 	}
2678 	slabp->inuse = 0;
2679 	slabp->colouroff = colour_off;
2680 	slabp->s_mem = objp + colour_off;
2681 	slabp->nodeid = nodeid;
2682 	slabp->free = 0;
2683 	return slabp;
2684 }
2685 
2686 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2687 {
2688 	return (kmem_bufctl_t *) (slabp + 1);
2689 }
2690 
2691 static void cache_init_objs(struct kmem_cache *cachep,
2692 			    struct slab *slabp)
2693 {
2694 	int i;
2695 
2696 	for (i = 0; i < cachep->num; i++) {
2697 		void *objp = index_to_obj(cachep, slabp, i);
2698 #if DEBUG
2699 		/* need to poison the objs? */
2700 		if (cachep->flags & SLAB_POISON)
2701 			poison_obj(cachep, objp, POISON_FREE);
2702 		if (cachep->flags & SLAB_STORE_USER)
2703 			*dbg_userword(cachep, objp) = NULL;
2704 
2705 		if (cachep->flags & SLAB_RED_ZONE) {
2706 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2707 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2708 		}
2709 		/*
2710 		 * Constructors are not allowed to allocate memory from the same
2711 		 * cache which they are a constructor for.  Otherwise, deadlock.
2712 		 * They must also be threaded.
2713 		 */
2714 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2715 			cachep->ctor(objp + obj_offset(cachep));
2716 
2717 		if (cachep->flags & SLAB_RED_ZONE) {
2718 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2719 				slab_error(cachep, "constructor overwrote the"
2720 					   " end of an object");
2721 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2722 				slab_error(cachep, "constructor overwrote the"
2723 					   " start of an object");
2724 		}
2725 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2726 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2727 			kernel_map_pages(virt_to_page(objp),
2728 					 cachep->buffer_size / PAGE_SIZE, 0);
2729 #else
2730 		if (cachep->ctor)
2731 			cachep->ctor(objp);
2732 #endif
2733 		slab_bufctl(slabp)[i] = i + 1;
2734 	}
2735 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2736 }
2737 
2738 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2739 {
2740 	if (CONFIG_ZONE_DMA_FLAG) {
2741 		if (flags & GFP_DMA)
2742 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2743 		else
2744 			BUG_ON(cachep->gfpflags & GFP_DMA);
2745 	}
2746 }
2747 
2748 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2749 				int nodeid)
2750 {
2751 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2752 	kmem_bufctl_t next;
2753 
2754 	slabp->inuse++;
2755 	next = slab_bufctl(slabp)[slabp->free];
2756 #if DEBUG
2757 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2758 	WARN_ON(slabp->nodeid != nodeid);
2759 #endif
2760 	slabp->free = next;
2761 
2762 	return objp;
2763 }
2764 
2765 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2766 				void *objp, int nodeid)
2767 {
2768 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2769 
2770 #if DEBUG
2771 	/* Verify that the slab belongs to the intended node */
2772 	WARN_ON(slabp->nodeid != nodeid);
2773 
2774 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2775 		printk(KERN_ERR "slab: double free detected in cache "
2776 				"'%s', objp %p\n", cachep->name, objp);
2777 		BUG();
2778 	}
2779 #endif
2780 	slab_bufctl(slabp)[objnr] = slabp->free;
2781 	slabp->free = objnr;
2782 	slabp->inuse--;
2783 }
2784 
2785 /*
2786  * Map pages beginning at addr to the given cache and slab. This is required
2787  * for the slab allocator to be able to lookup the cache and slab of a
2788  * virtual address for kfree, ksize, and slab debugging.
2789  */
2790 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2791 			   void *addr)
2792 {
2793 	int nr_pages;
2794 	struct page *page;
2795 
2796 	page = virt_to_page(addr);
2797 
2798 	nr_pages = 1;
2799 	if (likely(!PageCompound(page)))
2800 		nr_pages <<= cache->gfporder;
2801 
2802 	do {
2803 		page_set_cache(page, cache);
2804 		page_set_slab(page, slab);
2805 		page++;
2806 	} while (--nr_pages);
2807 }
2808 
2809 /*
2810  * Grow (by 1) the number of slabs within a cache.  This is called by
2811  * kmem_cache_alloc() when there are no active objs left in a cache.
2812  */
2813 static int cache_grow(struct kmem_cache *cachep,
2814 		gfp_t flags, int nodeid, void *objp)
2815 {
2816 	struct slab *slabp;
2817 	size_t offset;
2818 	gfp_t local_flags;
2819 	struct kmem_list3 *l3;
2820 
2821 	/*
2822 	 * Be lazy and only check for valid flags here,  keeping it out of the
2823 	 * critical path in kmem_cache_alloc().
2824 	 */
2825 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2826 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2827 
2828 	/* Take the l3 list lock to change the colour_next on this node */
2829 	check_irq_off();
2830 	l3 = cachep->nodelists[nodeid];
2831 	spin_lock(&l3->list_lock);
2832 
2833 	/* Get colour for the slab, and cal the next value. */
2834 	offset = l3->colour_next;
2835 	l3->colour_next++;
2836 	if (l3->colour_next >= cachep->colour)
2837 		l3->colour_next = 0;
2838 	spin_unlock(&l3->list_lock);
2839 
2840 	offset *= cachep->colour_off;
2841 
2842 	if (local_flags & __GFP_WAIT)
2843 		local_irq_enable();
2844 
2845 	/*
2846 	 * The test for missing atomic flag is performed here, rather than
2847 	 * the more obvious place, simply to reduce the critical path length
2848 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2849 	 * will eventually be caught here (where it matters).
2850 	 */
2851 	kmem_flagcheck(cachep, flags);
2852 
2853 	/*
2854 	 * Get mem for the objs.  Attempt to allocate a physical page from
2855 	 * 'nodeid'.
2856 	 */
2857 	if (!objp)
2858 		objp = kmem_getpages(cachep, local_flags, nodeid);
2859 	if (!objp)
2860 		goto failed;
2861 
2862 	/* Get slab management. */
2863 	slabp = alloc_slabmgmt(cachep, objp, offset,
2864 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2865 	if (!slabp)
2866 		goto opps1;
2867 
2868 	slab_map_pages(cachep, slabp, objp);
2869 
2870 	cache_init_objs(cachep, slabp);
2871 
2872 	if (local_flags & __GFP_WAIT)
2873 		local_irq_disable();
2874 	check_irq_off();
2875 	spin_lock(&l3->list_lock);
2876 
2877 	/* Make slab active. */
2878 	list_add_tail(&slabp->list, &(l3->slabs_free));
2879 	STATS_INC_GROWN(cachep);
2880 	l3->free_objects += cachep->num;
2881 	spin_unlock(&l3->list_lock);
2882 	return 1;
2883 opps1:
2884 	kmem_freepages(cachep, objp);
2885 failed:
2886 	if (local_flags & __GFP_WAIT)
2887 		local_irq_disable();
2888 	return 0;
2889 }
2890 
2891 #if DEBUG
2892 
2893 /*
2894  * Perform extra freeing checks:
2895  * - detect bad pointers.
2896  * - POISON/RED_ZONE checking
2897  */
2898 static void kfree_debugcheck(const void *objp)
2899 {
2900 	if (!virt_addr_valid(objp)) {
2901 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2902 		       (unsigned long)objp);
2903 		BUG();
2904 	}
2905 }
2906 
2907 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2908 {
2909 	unsigned long long redzone1, redzone2;
2910 
2911 	redzone1 = *dbg_redzone1(cache, obj);
2912 	redzone2 = *dbg_redzone2(cache, obj);
2913 
2914 	/*
2915 	 * Redzone is ok.
2916 	 */
2917 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2918 		return;
2919 
2920 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2921 		slab_error(cache, "double free detected");
2922 	else
2923 		slab_error(cache, "memory outside object was overwritten");
2924 
2925 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2926 			obj, redzone1, redzone2);
2927 }
2928 
2929 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2930 				   void *caller)
2931 {
2932 	struct page *page;
2933 	unsigned int objnr;
2934 	struct slab *slabp;
2935 
2936 	BUG_ON(virt_to_cache(objp) != cachep);
2937 
2938 	objp -= obj_offset(cachep);
2939 	kfree_debugcheck(objp);
2940 	page = virt_to_head_page(objp);
2941 
2942 	slabp = page_get_slab(page);
2943 
2944 	if (cachep->flags & SLAB_RED_ZONE) {
2945 		verify_redzone_free(cachep, objp);
2946 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2947 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2948 	}
2949 	if (cachep->flags & SLAB_STORE_USER)
2950 		*dbg_userword(cachep, objp) = caller;
2951 
2952 	objnr = obj_to_index(cachep, slabp, objp);
2953 
2954 	BUG_ON(objnr >= cachep->num);
2955 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2956 
2957 #ifdef CONFIG_DEBUG_SLAB_LEAK
2958 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2959 #endif
2960 	if (cachep->flags & SLAB_POISON) {
2961 #ifdef CONFIG_DEBUG_PAGEALLOC
2962 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2963 			store_stackinfo(cachep, objp, (unsigned long)caller);
2964 			kernel_map_pages(virt_to_page(objp),
2965 					 cachep->buffer_size / PAGE_SIZE, 0);
2966 		} else {
2967 			poison_obj(cachep, objp, POISON_FREE);
2968 		}
2969 #else
2970 		poison_obj(cachep, objp, POISON_FREE);
2971 #endif
2972 	}
2973 	return objp;
2974 }
2975 
2976 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2977 {
2978 	kmem_bufctl_t i;
2979 	int entries = 0;
2980 
2981 	/* Check slab's freelist to see if this obj is there. */
2982 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2983 		entries++;
2984 		if (entries > cachep->num || i >= cachep->num)
2985 			goto bad;
2986 	}
2987 	if (entries != cachep->num - slabp->inuse) {
2988 bad:
2989 		printk(KERN_ERR "slab: Internal list corruption detected in "
2990 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2991 			cachep->name, cachep->num, slabp, slabp->inuse);
2992 		for (i = 0;
2993 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2994 		     i++) {
2995 			if (i % 16 == 0)
2996 				printk("\n%03x:", i);
2997 			printk(" %02x", ((unsigned char *)slabp)[i]);
2998 		}
2999 		printk("\n");
3000 		BUG();
3001 	}
3002 }
3003 #else
3004 #define kfree_debugcheck(x) do { } while(0)
3005 #define cache_free_debugcheck(x,objp,z) (objp)
3006 #define check_slabp(x,y) do { } while(0)
3007 #endif
3008 
3009 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3010 {
3011 	int batchcount;
3012 	struct kmem_list3 *l3;
3013 	struct array_cache *ac;
3014 	int node;
3015 
3016 retry:
3017 	check_irq_off();
3018 	node = numa_mem_id();
3019 	ac = cpu_cache_get(cachep);
3020 	batchcount = ac->batchcount;
3021 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3022 		/*
3023 		 * If there was little recent activity on this cache, then
3024 		 * perform only a partial refill.  Otherwise we could generate
3025 		 * refill bouncing.
3026 		 */
3027 		batchcount = BATCHREFILL_LIMIT;
3028 	}
3029 	l3 = cachep->nodelists[node];
3030 
3031 	BUG_ON(ac->avail > 0 || !l3);
3032 	spin_lock(&l3->list_lock);
3033 
3034 	/* See if we can refill from the shared array */
3035 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
3036 		l3->shared->touched = 1;
3037 		goto alloc_done;
3038 	}
3039 
3040 	while (batchcount > 0) {
3041 		struct list_head *entry;
3042 		struct slab *slabp;
3043 		/* Get slab alloc is to come from. */
3044 		entry = l3->slabs_partial.next;
3045 		if (entry == &l3->slabs_partial) {
3046 			l3->free_touched = 1;
3047 			entry = l3->slabs_free.next;
3048 			if (entry == &l3->slabs_free)
3049 				goto must_grow;
3050 		}
3051 
3052 		slabp = list_entry(entry, struct slab, list);
3053 		check_slabp(cachep, slabp);
3054 		check_spinlock_acquired(cachep);
3055 
3056 		/*
3057 		 * The slab was either on partial or free list so
3058 		 * there must be at least one object available for
3059 		 * allocation.
3060 		 */
3061 		BUG_ON(slabp->inuse >= cachep->num);
3062 
3063 		while (slabp->inuse < cachep->num && batchcount--) {
3064 			STATS_INC_ALLOCED(cachep);
3065 			STATS_INC_ACTIVE(cachep);
3066 			STATS_SET_HIGH(cachep);
3067 
3068 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3069 							    node);
3070 		}
3071 		check_slabp(cachep, slabp);
3072 
3073 		/* move slabp to correct slabp list: */
3074 		list_del(&slabp->list);
3075 		if (slabp->free == BUFCTL_END)
3076 			list_add(&slabp->list, &l3->slabs_full);
3077 		else
3078 			list_add(&slabp->list, &l3->slabs_partial);
3079 	}
3080 
3081 must_grow:
3082 	l3->free_objects -= ac->avail;
3083 alloc_done:
3084 	spin_unlock(&l3->list_lock);
3085 
3086 	if (unlikely(!ac->avail)) {
3087 		int x;
3088 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3089 
3090 		/* cache_grow can reenable interrupts, then ac could change. */
3091 		ac = cpu_cache_get(cachep);
3092 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3093 			return NULL;
3094 
3095 		if (!ac->avail)		/* objects refilled by interrupt? */
3096 			goto retry;
3097 	}
3098 	ac->touched = 1;
3099 	return ac->entry[--ac->avail];
3100 }
3101 
3102 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3103 						gfp_t flags)
3104 {
3105 	might_sleep_if(flags & __GFP_WAIT);
3106 #if DEBUG
3107 	kmem_flagcheck(cachep, flags);
3108 #endif
3109 }
3110 
3111 #if DEBUG
3112 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3113 				gfp_t flags, void *objp, void *caller)
3114 {
3115 	if (!objp)
3116 		return objp;
3117 	if (cachep->flags & SLAB_POISON) {
3118 #ifdef CONFIG_DEBUG_PAGEALLOC
3119 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3120 			kernel_map_pages(virt_to_page(objp),
3121 					 cachep->buffer_size / PAGE_SIZE, 1);
3122 		else
3123 			check_poison_obj(cachep, objp);
3124 #else
3125 		check_poison_obj(cachep, objp);
3126 #endif
3127 		poison_obj(cachep, objp, POISON_INUSE);
3128 	}
3129 	if (cachep->flags & SLAB_STORE_USER)
3130 		*dbg_userword(cachep, objp) = caller;
3131 
3132 	if (cachep->flags & SLAB_RED_ZONE) {
3133 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3134 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3135 			slab_error(cachep, "double free, or memory outside"
3136 						" object was overwritten");
3137 			printk(KERN_ERR
3138 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3139 				objp, *dbg_redzone1(cachep, objp),
3140 				*dbg_redzone2(cachep, objp));
3141 		}
3142 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3143 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3144 	}
3145 #ifdef CONFIG_DEBUG_SLAB_LEAK
3146 	{
3147 		struct slab *slabp;
3148 		unsigned objnr;
3149 
3150 		slabp = page_get_slab(virt_to_head_page(objp));
3151 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3152 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3153 	}
3154 #endif
3155 	objp += obj_offset(cachep);
3156 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3157 		cachep->ctor(objp);
3158 	if (ARCH_SLAB_MINALIGN &&
3159 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3160 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3161 		       objp, (int)ARCH_SLAB_MINALIGN);
3162 	}
3163 	return objp;
3164 }
3165 #else
3166 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3167 #endif
3168 
3169 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3170 {
3171 	if (cachep == &cache_cache)
3172 		return false;
3173 
3174 	return should_failslab(obj_size(cachep), flags, cachep->flags);
3175 }
3176 
3177 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3178 {
3179 	void *objp;
3180 	struct array_cache *ac;
3181 
3182 	check_irq_off();
3183 
3184 	ac = cpu_cache_get(cachep);
3185 	if (likely(ac->avail)) {
3186 		STATS_INC_ALLOCHIT(cachep);
3187 		ac->touched = 1;
3188 		objp = ac->entry[--ac->avail];
3189 	} else {
3190 		STATS_INC_ALLOCMISS(cachep);
3191 		objp = cache_alloc_refill(cachep, flags);
3192 		/*
3193 		 * the 'ac' may be updated by cache_alloc_refill(),
3194 		 * and kmemleak_erase() requires its correct value.
3195 		 */
3196 		ac = cpu_cache_get(cachep);
3197 	}
3198 	/*
3199 	 * To avoid a false negative, if an object that is in one of the
3200 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3201 	 * treat the array pointers as a reference to the object.
3202 	 */
3203 	if (objp)
3204 		kmemleak_erase(&ac->entry[ac->avail]);
3205 	return objp;
3206 }
3207 
3208 #ifdef CONFIG_NUMA
3209 /*
3210  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3211  *
3212  * If we are in_interrupt, then process context, including cpusets and
3213  * mempolicy, may not apply and should not be used for allocation policy.
3214  */
3215 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3216 {
3217 	int nid_alloc, nid_here;
3218 
3219 	if (in_interrupt() || (flags & __GFP_THISNODE))
3220 		return NULL;
3221 	nid_alloc = nid_here = numa_mem_id();
3222 	get_mems_allowed();
3223 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3224 		nid_alloc = cpuset_slab_spread_node();
3225 	else if (current->mempolicy)
3226 		nid_alloc = slab_node(current->mempolicy);
3227 	put_mems_allowed();
3228 	if (nid_alloc != nid_here)
3229 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3230 	return NULL;
3231 }
3232 
3233 /*
3234  * Fallback function if there was no memory available and no objects on a
3235  * certain node and fall back is permitted. First we scan all the
3236  * available nodelists for available objects. If that fails then we
3237  * perform an allocation without specifying a node. This allows the page
3238  * allocator to do its reclaim / fallback magic. We then insert the
3239  * slab into the proper nodelist and then allocate from it.
3240  */
3241 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3242 {
3243 	struct zonelist *zonelist;
3244 	gfp_t local_flags;
3245 	struct zoneref *z;
3246 	struct zone *zone;
3247 	enum zone_type high_zoneidx = gfp_zone(flags);
3248 	void *obj = NULL;
3249 	int nid;
3250 
3251 	if (flags & __GFP_THISNODE)
3252 		return NULL;
3253 
3254 	get_mems_allowed();
3255 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3256 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3257 
3258 retry:
3259 	/*
3260 	 * Look through allowed nodes for objects available
3261 	 * from existing per node queues.
3262 	 */
3263 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3264 		nid = zone_to_nid(zone);
3265 
3266 		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3267 			cache->nodelists[nid] &&
3268 			cache->nodelists[nid]->free_objects) {
3269 				obj = ____cache_alloc_node(cache,
3270 					flags | GFP_THISNODE, nid);
3271 				if (obj)
3272 					break;
3273 		}
3274 	}
3275 
3276 	if (!obj) {
3277 		/*
3278 		 * This allocation will be performed within the constraints
3279 		 * of the current cpuset / memory policy requirements.
3280 		 * We may trigger various forms of reclaim on the allowed
3281 		 * set and go into memory reserves if necessary.
3282 		 */
3283 		if (local_flags & __GFP_WAIT)
3284 			local_irq_enable();
3285 		kmem_flagcheck(cache, flags);
3286 		obj = kmem_getpages(cache, local_flags, numa_mem_id());
3287 		if (local_flags & __GFP_WAIT)
3288 			local_irq_disable();
3289 		if (obj) {
3290 			/*
3291 			 * Insert into the appropriate per node queues
3292 			 */
3293 			nid = page_to_nid(virt_to_page(obj));
3294 			if (cache_grow(cache, flags, nid, obj)) {
3295 				obj = ____cache_alloc_node(cache,
3296 					flags | GFP_THISNODE, nid);
3297 				if (!obj)
3298 					/*
3299 					 * Another processor may allocate the
3300 					 * objects in the slab since we are
3301 					 * not holding any locks.
3302 					 */
3303 					goto retry;
3304 			} else {
3305 				/* cache_grow already freed obj */
3306 				obj = NULL;
3307 			}
3308 		}
3309 	}
3310 	put_mems_allowed();
3311 	return obj;
3312 }
3313 
3314 /*
3315  * A interface to enable slab creation on nodeid
3316  */
3317 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3318 				int nodeid)
3319 {
3320 	struct list_head *entry;
3321 	struct slab *slabp;
3322 	struct kmem_list3 *l3;
3323 	void *obj;
3324 	int x;
3325 
3326 	l3 = cachep->nodelists[nodeid];
3327 	BUG_ON(!l3);
3328 
3329 retry:
3330 	check_irq_off();
3331 	spin_lock(&l3->list_lock);
3332 	entry = l3->slabs_partial.next;
3333 	if (entry == &l3->slabs_partial) {
3334 		l3->free_touched = 1;
3335 		entry = l3->slabs_free.next;
3336 		if (entry == &l3->slabs_free)
3337 			goto must_grow;
3338 	}
3339 
3340 	slabp = list_entry(entry, struct slab, list);
3341 	check_spinlock_acquired_node(cachep, nodeid);
3342 	check_slabp(cachep, slabp);
3343 
3344 	STATS_INC_NODEALLOCS(cachep);
3345 	STATS_INC_ACTIVE(cachep);
3346 	STATS_SET_HIGH(cachep);
3347 
3348 	BUG_ON(slabp->inuse == cachep->num);
3349 
3350 	obj = slab_get_obj(cachep, slabp, nodeid);
3351 	check_slabp(cachep, slabp);
3352 	l3->free_objects--;
3353 	/* move slabp to correct slabp list: */
3354 	list_del(&slabp->list);
3355 
3356 	if (slabp->free == BUFCTL_END)
3357 		list_add(&slabp->list, &l3->slabs_full);
3358 	else
3359 		list_add(&slabp->list, &l3->slabs_partial);
3360 
3361 	spin_unlock(&l3->list_lock);
3362 	goto done;
3363 
3364 must_grow:
3365 	spin_unlock(&l3->list_lock);
3366 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3367 	if (x)
3368 		goto retry;
3369 
3370 	return fallback_alloc(cachep, flags);
3371 
3372 done:
3373 	return obj;
3374 }
3375 
3376 /**
3377  * kmem_cache_alloc_node - Allocate an object on the specified node
3378  * @cachep: The cache to allocate from.
3379  * @flags: See kmalloc().
3380  * @nodeid: node number of the target node.
3381  * @caller: return address of caller, used for debug information
3382  *
3383  * Identical to kmem_cache_alloc but it will allocate memory on the given
3384  * node, which can improve the performance for cpu bound structures.
3385  *
3386  * Fallback to other node is possible if __GFP_THISNODE is not set.
3387  */
3388 static __always_inline void *
3389 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3390 		   void *caller)
3391 {
3392 	unsigned long save_flags;
3393 	void *ptr;
3394 	int slab_node = numa_mem_id();
3395 
3396 	flags &= gfp_allowed_mask;
3397 
3398 	lockdep_trace_alloc(flags);
3399 
3400 	if (slab_should_failslab(cachep, flags))
3401 		return NULL;
3402 
3403 	cache_alloc_debugcheck_before(cachep, flags);
3404 	local_irq_save(save_flags);
3405 
3406 	if (nodeid == -1)
3407 		nodeid = slab_node;
3408 
3409 	if (unlikely(!cachep->nodelists[nodeid])) {
3410 		/* Node not bootstrapped yet */
3411 		ptr = fallback_alloc(cachep, flags);
3412 		goto out;
3413 	}
3414 
3415 	if (nodeid == slab_node) {
3416 		/*
3417 		 * Use the locally cached objects if possible.
3418 		 * However ____cache_alloc does not allow fallback
3419 		 * to other nodes. It may fail while we still have
3420 		 * objects on other nodes available.
3421 		 */
3422 		ptr = ____cache_alloc(cachep, flags);
3423 		if (ptr)
3424 			goto out;
3425 	}
3426 	/* ___cache_alloc_node can fall back to other nodes */
3427 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3428   out:
3429 	local_irq_restore(save_flags);
3430 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3431 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3432 				 flags);
3433 
3434 	if (likely(ptr))
3435 		kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3436 
3437 	if (unlikely((flags & __GFP_ZERO) && ptr))
3438 		memset(ptr, 0, obj_size(cachep));
3439 
3440 	return ptr;
3441 }
3442 
3443 static __always_inline void *
3444 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3445 {
3446 	void *objp;
3447 
3448 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3449 		objp = alternate_node_alloc(cache, flags);
3450 		if (objp)
3451 			goto out;
3452 	}
3453 	objp = ____cache_alloc(cache, flags);
3454 
3455 	/*
3456 	 * We may just have run out of memory on the local node.
3457 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3458 	 */
3459 	if (!objp)
3460 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3461 
3462   out:
3463 	return objp;
3464 }
3465 #else
3466 
3467 static __always_inline void *
3468 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3469 {
3470 	return ____cache_alloc(cachep, flags);
3471 }
3472 
3473 #endif /* CONFIG_NUMA */
3474 
3475 static __always_inline void *
3476 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3477 {
3478 	unsigned long save_flags;
3479 	void *objp;
3480 
3481 	flags &= gfp_allowed_mask;
3482 
3483 	lockdep_trace_alloc(flags);
3484 
3485 	if (slab_should_failslab(cachep, flags))
3486 		return NULL;
3487 
3488 	cache_alloc_debugcheck_before(cachep, flags);
3489 	local_irq_save(save_flags);
3490 	objp = __do_cache_alloc(cachep, flags);
3491 	local_irq_restore(save_flags);
3492 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3493 	kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3494 				 flags);
3495 	prefetchw(objp);
3496 
3497 	if (likely(objp))
3498 		kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3499 
3500 	if (unlikely((flags & __GFP_ZERO) && objp))
3501 		memset(objp, 0, obj_size(cachep));
3502 
3503 	return objp;
3504 }
3505 
3506 /*
3507  * Caller needs to acquire correct kmem_list's list_lock
3508  */
3509 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3510 		       int node)
3511 {
3512 	int i;
3513 	struct kmem_list3 *l3;
3514 
3515 	for (i = 0; i < nr_objects; i++) {
3516 		void *objp = objpp[i];
3517 		struct slab *slabp;
3518 
3519 		slabp = virt_to_slab(objp);
3520 		l3 = cachep->nodelists[node];
3521 		list_del(&slabp->list);
3522 		check_spinlock_acquired_node(cachep, node);
3523 		check_slabp(cachep, slabp);
3524 		slab_put_obj(cachep, slabp, objp, node);
3525 		STATS_DEC_ACTIVE(cachep);
3526 		l3->free_objects++;
3527 		check_slabp(cachep, slabp);
3528 
3529 		/* fixup slab chains */
3530 		if (slabp->inuse == 0) {
3531 			if (l3->free_objects > l3->free_limit) {
3532 				l3->free_objects -= cachep->num;
3533 				/* No need to drop any previously held
3534 				 * lock here, even if we have a off-slab slab
3535 				 * descriptor it is guaranteed to come from
3536 				 * a different cache, refer to comments before
3537 				 * alloc_slabmgmt.
3538 				 */
3539 				slab_destroy(cachep, slabp);
3540 			} else {
3541 				list_add(&slabp->list, &l3->slabs_free);
3542 			}
3543 		} else {
3544 			/* Unconditionally move a slab to the end of the
3545 			 * partial list on free - maximum time for the
3546 			 * other objects to be freed, too.
3547 			 */
3548 			list_add_tail(&slabp->list, &l3->slabs_partial);
3549 		}
3550 	}
3551 }
3552 
3553 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3554 {
3555 	int batchcount;
3556 	struct kmem_list3 *l3;
3557 	int node = numa_mem_id();
3558 
3559 	batchcount = ac->batchcount;
3560 #if DEBUG
3561 	BUG_ON(!batchcount || batchcount > ac->avail);
3562 #endif
3563 	check_irq_off();
3564 	l3 = cachep->nodelists[node];
3565 	spin_lock(&l3->list_lock);
3566 	if (l3->shared) {
3567 		struct array_cache *shared_array = l3->shared;
3568 		int max = shared_array->limit - shared_array->avail;
3569 		if (max) {
3570 			if (batchcount > max)
3571 				batchcount = max;
3572 			memcpy(&(shared_array->entry[shared_array->avail]),
3573 			       ac->entry, sizeof(void *) * batchcount);
3574 			shared_array->avail += batchcount;
3575 			goto free_done;
3576 		}
3577 	}
3578 
3579 	free_block(cachep, ac->entry, batchcount, node);
3580 free_done:
3581 #if STATS
3582 	{
3583 		int i = 0;
3584 		struct list_head *p;
3585 
3586 		p = l3->slabs_free.next;
3587 		while (p != &(l3->slabs_free)) {
3588 			struct slab *slabp;
3589 
3590 			slabp = list_entry(p, struct slab, list);
3591 			BUG_ON(slabp->inuse);
3592 
3593 			i++;
3594 			p = p->next;
3595 		}
3596 		STATS_SET_FREEABLE(cachep, i);
3597 	}
3598 #endif
3599 	spin_unlock(&l3->list_lock);
3600 	ac->avail -= batchcount;
3601 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3602 }
3603 
3604 /*
3605  * Release an obj back to its cache. If the obj has a constructed state, it must
3606  * be in this state _before_ it is released.  Called with disabled ints.
3607  */
3608 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3609     void *caller)
3610 {
3611 	struct array_cache *ac = cpu_cache_get(cachep);
3612 
3613 	check_irq_off();
3614 	kmemleak_free_recursive(objp, cachep->flags);
3615 	objp = cache_free_debugcheck(cachep, objp, caller);
3616 
3617 	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3618 
3619 	/*
3620 	 * Skip calling cache_free_alien() when the platform is not numa.
3621 	 * This will avoid cache misses that happen while accessing slabp (which
3622 	 * is per page memory  reference) to get nodeid. Instead use a global
3623 	 * variable to skip the call, which is mostly likely to be present in
3624 	 * the cache.
3625 	 */
3626 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3627 		return;
3628 
3629 	if (likely(ac->avail < ac->limit)) {
3630 		STATS_INC_FREEHIT(cachep);
3631 		ac->entry[ac->avail++] = objp;
3632 		return;
3633 	} else {
3634 		STATS_INC_FREEMISS(cachep);
3635 		cache_flusharray(cachep, ac);
3636 		ac->entry[ac->avail++] = objp;
3637 	}
3638 }
3639 
3640 /**
3641  * kmem_cache_alloc - Allocate an object
3642  * @cachep: The cache to allocate from.
3643  * @flags: See kmalloc().
3644  *
3645  * Allocate an object from this cache.  The flags are only relevant
3646  * if the cache has no available objects.
3647  */
3648 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3649 {
3650 	void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3651 
3652 	trace_kmem_cache_alloc(_RET_IP_, ret,
3653 			       obj_size(cachep), cachep->buffer_size, flags);
3654 
3655 	return ret;
3656 }
3657 EXPORT_SYMBOL(kmem_cache_alloc);
3658 
3659 #ifdef CONFIG_TRACING
3660 void *
3661 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3662 {
3663 	void *ret;
3664 
3665 	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3666 
3667 	trace_kmalloc(_RET_IP_, ret,
3668 		      size, slab_buffer_size(cachep), flags);
3669 	return ret;
3670 }
3671 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3672 #endif
3673 
3674 #ifdef CONFIG_NUMA
3675 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3676 {
3677 	void *ret = __cache_alloc_node(cachep, flags, nodeid,
3678 				       __builtin_return_address(0));
3679 
3680 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3681 				    obj_size(cachep), cachep->buffer_size,
3682 				    flags, nodeid);
3683 
3684 	return ret;
3685 }
3686 EXPORT_SYMBOL(kmem_cache_alloc_node);
3687 
3688 #ifdef CONFIG_TRACING
3689 void *kmem_cache_alloc_node_trace(size_t size,
3690 				  struct kmem_cache *cachep,
3691 				  gfp_t flags,
3692 				  int nodeid)
3693 {
3694 	void *ret;
3695 
3696 	ret = __cache_alloc_node(cachep, flags, nodeid,
3697 				  __builtin_return_address(0));
3698 	trace_kmalloc_node(_RET_IP_, ret,
3699 			   size, slab_buffer_size(cachep),
3700 			   flags, nodeid);
3701 	return ret;
3702 }
3703 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3704 #endif
3705 
3706 static __always_inline void *
3707 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3708 {
3709 	struct kmem_cache *cachep;
3710 
3711 	cachep = kmem_find_general_cachep(size, flags);
3712 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3713 		return cachep;
3714 	return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3715 }
3716 
3717 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3718 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3719 {
3720 	return __do_kmalloc_node(size, flags, node,
3721 			__builtin_return_address(0));
3722 }
3723 EXPORT_SYMBOL(__kmalloc_node);
3724 
3725 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3726 		int node, unsigned long caller)
3727 {
3728 	return __do_kmalloc_node(size, flags, node, (void *)caller);
3729 }
3730 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3731 #else
3732 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3733 {
3734 	return __do_kmalloc_node(size, flags, node, NULL);
3735 }
3736 EXPORT_SYMBOL(__kmalloc_node);
3737 #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3738 #endif /* CONFIG_NUMA */
3739 
3740 /**
3741  * __do_kmalloc - allocate memory
3742  * @size: how many bytes of memory are required.
3743  * @flags: the type of memory to allocate (see kmalloc).
3744  * @caller: function caller for debug tracking of the caller
3745  */
3746 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3747 					  void *caller)
3748 {
3749 	struct kmem_cache *cachep;
3750 	void *ret;
3751 
3752 	/* If you want to save a few bytes .text space: replace
3753 	 * __ with kmem_.
3754 	 * Then kmalloc uses the uninlined functions instead of the inline
3755 	 * functions.
3756 	 */
3757 	cachep = __find_general_cachep(size, flags);
3758 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3759 		return cachep;
3760 	ret = __cache_alloc(cachep, flags, caller);
3761 
3762 	trace_kmalloc((unsigned long) caller, ret,
3763 		      size, cachep->buffer_size, flags);
3764 
3765 	return ret;
3766 }
3767 
3768 
3769 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3770 void *__kmalloc(size_t size, gfp_t flags)
3771 {
3772 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3773 }
3774 EXPORT_SYMBOL(__kmalloc);
3775 
3776 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3777 {
3778 	return __do_kmalloc(size, flags, (void *)caller);
3779 }
3780 EXPORT_SYMBOL(__kmalloc_track_caller);
3781 
3782 #else
3783 void *__kmalloc(size_t size, gfp_t flags)
3784 {
3785 	return __do_kmalloc(size, flags, NULL);
3786 }
3787 EXPORT_SYMBOL(__kmalloc);
3788 #endif
3789 
3790 /**
3791  * kmem_cache_free - Deallocate an object
3792  * @cachep: The cache the allocation was from.
3793  * @objp: The previously allocated object.
3794  *
3795  * Free an object which was previously allocated from this
3796  * cache.
3797  */
3798 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3799 {
3800 	unsigned long flags;
3801 
3802 	local_irq_save(flags);
3803 	debug_check_no_locks_freed(objp, obj_size(cachep));
3804 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3805 		debug_check_no_obj_freed(objp, obj_size(cachep));
3806 	__cache_free(cachep, objp, __builtin_return_address(0));
3807 	local_irq_restore(flags);
3808 
3809 	trace_kmem_cache_free(_RET_IP_, objp);
3810 }
3811 EXPORT_SYMBOL(kmem_cache_free);
3812 
3813 /**
3814  * kfree - free previously allocated memory
3815  * @objp: pointer returned by kmalloc.
3816  *
3817  * If @objp is NULL, no operation is performed.
3818  *
3819  * Don't free memory not originally allocated by kmalloc()
3820  * or you will run into trouble.
3821  */
3822 void kfree(const void *objp)
3823 {
3824 	struct kmem_cache *c;
3825 	unsigned long flags;
3826 
3827 	trace_kfree(_RET_IP_, objp);
3828 
3829 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3830 		return;
3831 	local_irq_save(flags);
3832 	kfree_debugcheck(objp);
3833 	c = virt_to_cache(objp);
3834 	debug_check_no_locks_freed(objp, obj_size(c));
3835 	debug_check_no_obj_freed(objp, obj_size(c));
3836 	__cache_free(c, (void *)objp, __builtin_return_address(0));
3837 	local_irq_restore(flags);
3838 }
3839 EXPORT_SYMBOL(kfree);
3840 
3841 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3842 {
3843 	return obj_size(cachep);
3844 }
3845 EXPORT_SYMBOL(kmem_cache_size);
3846 
3847 /*
3848  * This initializes kmem_list3 or resizes various caches for all nodes.
3849  */
3850 static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3851 {
3852 	int node;
3853 	struct kmem_list3 *l3;
3854 	struct array_cache *new_shared;
3855 	struct array_cache **new_alien = NULL;
3856 
3857 	for_each_online_node(node) {
3858 
3859                 if (use_alien_caches) {
3860                         new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3861                         if (!new_alien)
3862                                 goto fail;
3863                 }
3864 
3865 		new_shared = NULL;
3866 		if (cachep->shared) {
3867 			new_shared = alloc_arraycache(node,
3868 				cachep->shared*cachep->batchcount,
3869 					0xbaadf00d, gfp);
3870 			if (!new_shared) {
3871 				free_alien_cache(new_alien);
3872 				goto fail;
3873 			}
3874 		}
3875 
3876 		l3 = cachep->nodelists[node];
3877 		if (l3) {
3878 			struct array_cache *shared = l3->shared;
3879 
3880 			spin_lock_irq(&l3->list_lock);
3881 
3882 			if (shared)
3883 				free_block(cachep, shared->entry,
3884 						shared->avail, node);
3885 
3886 			l3->shared = new_shared;
3887 			if (!l3->alien) {
3888 				l3->alien = new_alien;
3889 				new_alien = NULL;
3890 			}
3891 			l3->free_limit = (1 + nr_cpus_node(node)) *
3892 					cachep->batchcount + cachep->num;
3893 			spin_unlock_irq(&l3->list_lock);
3894 			kfree(shared);
3895 			free_alien_cache(new_alien);
3896 			continue;
3897 		}
3898 		l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
3899 		if (!l3) {
3900 			free_alien_cache(new_alien);
3901 			kfree(new_shared);
3902 			goto fail;
3903 		}
3904 
3905 		kmem_list3_init(l3);
3906 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3907 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3908 		l3->shared = new_shared;
3909 		l3->alien = new_alien;
3910 		l3->free_limit = (1 + nr_cpus_node(node)) *
3911 					cachep->batchcount + cachep->num;
3912 		cachep->nodelists[node] = l3;
3913 	}
3914 	return 0;
3915 
3916 fail:
3917 	if (!cachep->next.next) {
3918 		/* Cache is not active yet. Roll back what we did */
3919 		node--;
3920 		while (node >= 0) {
3921 			if (cachep->nodelists[node]) {
3922 				l3 = cachep->nodelists[node];
3923 
3924 				kfree(l3->shared);
3925 				free_alien_cache(l3->alien);
3926 				kfree(l3);
3927 				cachep->nodelists[node] = NULL;
3928 			}
3929 			node--;
3930 		}
3931 	}
3932 	return -ENOMEM;
3933 }
3934 
3935 struct ccupdate_struct {
3936 	struct kmem_cache *cachep;
3937 	struct array_cache *new[NR_CPUS];
3938 };
3939 
3940 static void do_ccupdate_local(void *info)
3941 {
3942 	struct ccupdate_struct *new = info;
3943 	struct array_cache *old;
3944 
3945 	check_irq_off();
3946 	old = cpu_cache_get(new->cachep);
3947 
3948 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3949 	new->new[smp_processor_id()] = old;
3950 }
3951 
3952 /* Always called with the cache_chain_mutex held */
3953 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3954 				int batchcount, int shared, gfp_t gfp)
3955 {
3956 	struct ccupdate_struct *new;
3957 	int i;
3958 
3959 	new = kzalloc(sizeof(*new), gfp);
3960 	if (!new)
3961 		return -ENOMEM;
3962 
3963 	for_each_online_cpu(i) {
3964 		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3965 						batchcount, gfp);
3966 		if (!new->new[i]) {
3967 			for (i--; i >= 0; i--)
3968 				kfree(new->new[i]);
3969 			kfree(new);
3970 			return -ENOMEM;
3971 		}
3972 	}
3973 	new->cachep = cachep;
3974 
3975 	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3976 
3977 	check_irq_on();
3978 	cachep->batchcount = batchcount;
3979 	cachep->limit = limit;
3980 	cachep->shared = shared;
3981 
3982 	for_each_online_cpu(i) {
3983 		struct array_cache *ccold = new->new[i];
3984 		if (!ccold)
3985 			continue;
3986 		spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
3987 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3988 		spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
3989 		kfree(ccold);
3990 	}
3991 	kfree(new);
3992 	return alloc_kmemlist(cachep, gfp);
3993 }
3994 
3995 /* Called with cache_chain_mutex held always */
3996 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3997 {
3998 	int err;
3999 	int limit, shared;
4000 
4001 	/*
4002 	 * The head array serves three purposes:
4003 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4004 	 * - reduce the number of spinlock operations.
4005 	 * - reduce the number of linked list operations on the slab and
4006 	 *   bufctl chains: array operations are cheaper.
4007 	 * The numbers are guessed, we should auto-tune as described by
4008 	 * Bonwick.
4009 	 */
4010 	if (cachep->buffer_size > 131072)
4011 		limit = 1;
4012 	else if (cachep->buffer_size > PAGE_SIZE)
4013 		limit = 8;
4014 	else if (cachep->buffer_size > 1024)
4015 		limit = 24;
4016 	else if (cachep->buffer_size > 256)
4017 		limit = 54;
4018 	else
4019 		limit = 120;
4020 
4021 	/*
4022 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4023 	 * allocation behaviour: Most allocs on one cpu, most free operations
4024 	 * on another cpu. For these cases, an efficient object passing between
4025 	 * cpus is necessary. This is provided by a shared array. The array
4026 	 * replaces Bonwick's magazine layer.
4027 	 * On uniprocessor, it's functionally equivalent (but less efficient)
4028 	 * to a larger limit. Thus disabled by default.
4029 	 */
4030 	shared = 0;
4031 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4032 		shared = 8;
4033 
4034 #if DEBUG
4035 	/*
4036 	 * With debugging enabled, large batchcount lead to excessively long
4037 	 * periods with disabled local interrupts. Limit the batchcount
4038 	 */
4039 	if (limit > 32)
4040 		limit = 32;
4041 #endif
4042 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
4043 	if (err)
4044 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4045 		       cachep->name, -err);
4046 	return err;
4047 }
4048 
4049 /*
4050  * Drain an array if it contains any elements taking the l3 lock only if
4051  * necessary. Note that the l3 listlock also protects the array_cache
4052  * if drain_array() is used on the shared array.
4053  */
4054 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4055 			 struct array_cache *ac, int force, int node)
4056 {
4057 	int tofree;
4058 
4059 	if (!ac || !ac->avail)
4060 		return;
4061 	if (ac->touched && !force) {
4062 		ac->touched = 0;
4063 	} else {
4064 		spin_lock_irq(&l3->list_lock);
4065 		if (ac->avail) {
4066 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4067 			if (tofree > ac->avail)
4068 				tofree = (ac->avail + 1) / 2;
4069 			free_block(cachep, ac->entry, tofree, node);
4070 			ac->avail -= tofree;
4071 			memmove(ac->entry, &(ac->entry[tofree]),
4072 				sizeof(void *) * ac->avail);
4073 		}
4074 		spin_unlock_irq(&l3->list_lock);
4075 	}
4076 }
4077 
4078 /**
4079  * cache_reap - Reclaim memory from caches.
4080  * @w: work descriptor
4081  *
4082  * Called from workqueue/eventd every few seconds.
4083  * Purpose:
4084  * - clear the per-cpu caches for this CPU.
4085  * - return freeable pages to the main free memory pool.
4086  *
4087  * If we cannot acquire the cache chain mutex then just give up - we'll try
4088  * again on the next iteration.
4089  */
4090 static void cache_reap(struct work_struct *w)
4091 {
4092 	struct kmem_cache *searchp;
4093 	struct kmem_list3 *l3;
4094 	int node = numa_mem_id();
4095 	struct delayed_work *work = to_delayed_work(w);
4096 
4097 	if (!mutex_trylock(&cache_chain_mutex))
4098 		/* Give up. Setup the next iteration. */
4099 		goto out;
4100 
4101 	list_for_each_entry(searchp, &cache_chain, next) {
4102 		check_irq_on();
4103 
4104 		/*
4105 		 * We only take the l3 lock if absolutely necessary and we
4106 		 * have established with reasonable certainty that
4107 		 * we can do some work if the lock was obtained.
4108 		 */
4109 		l3 = searchp->nodelists[node];
4110 
4111 		reap_alien(searchp, l3);
4112 
4113 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4114 
4115 		/*
4116 		 * These are racy checks but it does not matter
4117 		 * if we skip one check or scan twice.
4118 		 */
4119 		if (time_after(l3->next_reap, jiffies))
4120 			goto next;
4121 
4122 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4123 
4124 		drain_array(searchp, l3, l3->shared, 0, node);
4125 
4126 		if (l3->free_touched)
4127 			l3->free_touched = 0;
4128 		else {
4129 			int freed;
4130 
4131 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4132 				5 * searchp->num - 1) / (5 * searchp->num));
4133 			STATS_ADD_REAPED(searchp, freed);
4134 		}
4135 next:
4136 		cond_resched();
4137 	}
4138 	check_irq_on();
4139 	mutex_unlock(&cache_chain_mutex);
4140 	next_reap_node();
4141 out:
4142 	/* Set up the next iteration */
4143 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4144 }
4145 
4146 #ifdef CONFIG_SLABINFO
4147 
4148 static void print_slabinfo_header(struct seq_file *m)
4149 {
4150 	/*
4151 	 * Output format version, so at least we can change it
4152 	 * without _too_ many complaints.
4153 	 */
4154 #if STATS
4155 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4156 #else
4157 	seq_puts(m, "slabinfo - version: 2.1\n");
4158 #endif
4159 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4160 		 "<objperslab> <pagesperslab>");
4161 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4162 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4163 #if STATS
4164 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4165 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4166 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4167 #endif
4168 	seq_putc(m, '\n');
4169 }
4170 
4171 static void *s_start(struct seq_file *m, loff_t *pos)
4172 {
4173 	loff_t n = *pos;
4174 
4175 	mutex_lock(&cache_chain_mutex);
4176 	if (!n)
4177 		print_slabinfo_header(m);
4178 
4179 	return seq_list_start(&cache_chain, *pos);
4180 }
4181 
4182 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4183 {
4184 	return seq_list_next(p, &cache_chain, pos);
4185 }
4186 
4187 static void s_stop(struct seq_file *m, void *p)
4188 {
4189 	mutex_unlock(&cache_chain_mutex);
4190 }
4191 
4192 static int s_show(struct seq_file *m, void *p)
4193 {
4194 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4195 	struct slab *slabp;
4196 	unsigned long active_objs;
4197 	unsigned long num_objs;
4198 	unsigned long active_slabs = 0;
4199 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4200 	const char *name;
4201 	char *error = NULL;
4202 	int node;
4203 	struct kmem_list3 *l3;
4204 
4205 	active_objs = 0;
4206 	num_slabs = 0;
4207 	for_each_online_node(node) {
4208 		l3 = cachep->nodelists[node];
4209 		if (!l3)
4210 			continue;
4211 
4212 		check_irq_on();
4213 		spin_lock_irq(&l3->list_lock);
4214 
4215 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4216 			if (slabp->inuse != cachep->num && !error)
4217 				error = "slabs_full accounting error";
4218 			active_objs += cachep->num;
4219 			active_slabs++;
4220 		}
4221 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4222 			if (slabp->inuse == cachep->num && !error)
4223 				error = "slabs_partial inuse accounting error";
4224 			if (!slabp->inuse && !error)
4225 				error = "slabs_partial/inuse accounting error";
4226 			active_objs += slabp->inuse;
4227 			active_slabs++;
4228 		}
4229 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4230 			if (slabp->inuse && !error)
4231 				error = "slabs_free/inuse accounting error";
4232 			num_slabs++;
4233 		}
4234 		free_objects += l3->free_objects;
4235 		if (l3->shared)
4236 			shared_avail += l3->shared->avail;
4237 
4238 		spin_unlock_irq(&l3->list_lock);
4239 	}
4240 	num_slabs += active_slabs;
4241 	num_objs = num_slabs * cachep->num;
4242 	if (num_objs - active_objs != free_objects && !error)
4243 		error = "free_objects accounting error";
4244 
4245 	name = cachep->name;
4246 	if (error)
4247 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4248 
4249 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4250 		   name, active_objs, num_objs, cachep->buffer_size,
4251 		   cachep->num, (1 << cachep->gfporder));
4252 	seq_printf(m, " : tunables %4u %4u %4u",
4253 		   cachep->limit, cachep->batchcount, cachep->shared);
4254 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4255 		   active_slabs, num_slabs, shared_avail);
4256 #if STATS
4257 	{			/* list3 stats */
4258 		unsigned long high = cachep->high_mark;
4259 		unsigned long allocs = cachep->num_allocations;
4260 		unsigned long grown = cachep->grown;
4261 		unsigned long reaped = cachep->reaped;
4262 		unsigned long errors = cachep->errors;
4263 		unsigned long max_freeable = cachep->max_freeable;
4264 		unsigned long node_allocs = cachep->node_allocs;
4265 		unsigned long node_frees = cachep->node_frees;
4266 		unsigned long overflows = cachep->node_overflow;
4267 
4268 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4269 			   "%4lu %4lu %4lu %4lu %4lu",
4270 			   allocs, high, grown,
4271 			   reaped, errors, max_freeable, node_allocs,
4272 			   node_frees, overflows);
4273 	}
4274 	/* cpu stats */
4275 	{
4276 		unsigned long allochit = atomic_read(&cachep->allochit);
4277 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4278 		unsigned long freehit = atomic_read(&cachep->freehit);
4279 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4280 
4281 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4282 			   allochit, allocmiss, freehit, freemiss);
4283 	}
4284 #endif
4285 	seq_putc(m, '\n');
4286 	return 0;
4287 }
4288 
4289 /*
4290  * slabinfo_op - iterator that generates /proc/slabinfo
4291  *
4292  * Output layout:
4293  * cache-name
4294  * num-active-objs
4295  * total-objs
4296  * object size
4297  * num-active-slabs
4298  * total-slabs
4299  * num-pages-per-slab
4300  * + further values on SMP and with statistics enabled
4301  */
4302 
4303 static const struct seq_operations slabinfo_op = {
4304 	.start = s_start,
4305 	.next = s_next,
4306 	.stop = s_stop,
4307 	.show = s_show,
4308 };
4309 
4310 #define MAX_SLABINFO_WRITE 128
4311 /**
4312  * slabinfo_write - Tuning for the slab allocator
4313  * @file: unused
4314  * @buffer: user buffer
4315  * @count: data length
4316  * @ppos: unused
4317  */
4318 static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4319 		       size_t count, loff_t *ppos)
4320 {
4321 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4322 	int limit, batchcount, shared, res;
4323 	struct kmem_cache *cachep;
4324 
4325 	if (count > MAX_SLABINFO_WRITE)
4326 		return -EINVAL;
4327 	if (copy_from_user(&kbuf, buffer, count))
4328 		return -EFAULT;
4329 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4330 
4331 	tmp = strchr(kbuf, ' ');
4332 	if (!tmp)
4333 		return -EINVAL;
4334 	*tmp = '\0';
4335 	tmp++;
4336 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4337 		return -EINVAL;
4338 
4339 	/* Find the cache in the chain of caches. */
4340 	mutex_lock(&cache_chain_mutex);
4341 	res = -EINVAL;
4342 	list_for_each_entry(cachep, &cache_chain, next) {
4343 		if (!strcmp(cachep->name, kbuf)) {
4344 			if (limit < 1 || batchcount < 1 ||
4345 					batchcount > limit || shared < 0) {
4346 				res = 0;
4347 			} else {
4348 				res = do_tune_cpucache(cachep, limit,
4349 						       batchcount, shared,
4350 						       GFP_KERNEL);
4351 			}
4352 			break;
4353 		}
4354 	}
4355 	mutex_unlock(&cache_chain_mutex);
4356 	if (res >= 0)
4357 		res = count;
4358 	return res;
4359 }
4360 
4361 static int slabinfo_open(struct inode *inode, struct file *file)
4362 {
4363 	return seq_open(file, &slabinfo_op);
4364 }
4365 
4366 static const struct file_operations proc_slabinfo_operations = {
4367 	.open		= slabinfo_open,
4368 	.read		= seq_read,
4369 	.write		= slabinfo_write,
4370 	.llseek		= seq_lseek,
4371 	.release	= seq_release,
4372 };
4373 
4374 #ifdef CONFIG_DEBUG_SLAB_LEAK
4375 
4376 static void *leaks_start(struct seq_file *m, loff_t *pos)
4377 {
4378 	mutex_lock(&cache_chain_mutex);
4379 	return seq_list_start(&cache_chain, *pos);
4380 }
4381 
4382 static inline int add_caller(unsigned long *n, unsigned long v)
4383 {
4384 	unsigned long *p;
4385 	int l;
4386 	if (!v)
4387 		return 1;
4388 	l = n[1];
4389 	p = n + 2;
4390 	while (l) {
4391 		int i = l/2;
4392 		unsigned long *q = p + 2 * i;
4393 		if (*q == v) {
4394 			q[1]++;
4395 			return 1;
4396 		}
4397 		if (*q > v) {
4398 			l = i;
4399 		} else {
4400 			p = q + 2;
4401 			l -= i + 1;
4402 		}
4403 	}
4404 	if (++n[1] == n[0])
4405 		return 0;
4406 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4407 	p[0] = v;
4408 	p[1] = 1;
4409 	return 1;
4410 }
4411 
4412 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4413 {
4414 	void *p;
4415 	int i;
4416 	if (n[0] == n[1])
4417 		return;
4418 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4419 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4420 			continue;
4421 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4422 			return;
4423 	}
4424 }
4425 
4426 static void show_symbol(struct seq_file *m, unsigned long address)
4427 {
4428 #ifdef CONFIG_KALLSYMS
4429 	unsigned long offset, size;
4430 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4431 
4432 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4433 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4434 		if (modname[0])
4435 			seq_printf(m, " [%s]", modname);
4436 		return;
4437 	}
4438 #endif
4439 	seq_printf(m, "%p", (void *)address);
4440 }
4441 
4442 static int leaks_show(struct seq_file *m, void *p)
4443 {
4444 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4445 	struct slab *slabp;
4446 	struct kmem_list3 *l3;
4447 	const char *name;
4448 	unsigned long *n = m->private;
4449 	int node;
4450 	int i;
4451 
4452 	if (!(cachep->flags & SLAB_STORE_USER))
4453 		return 0;
4454 	if (!(cachep->flags & SLAB_RED_ZONE))
4455 		return 0;
4456 
4457 	/* OK, we can do it */
4458 
4459 	n[1] = 0;
4460 
4461 	for_each_online_node(node) {
4462 		l3 = cachep->nodelists[node];
4463 		if (!l3)
4464 			continue;
4465 
4466 		check_irq_on();
4467 		spin_lock_irq(&l3->list_lock);
4468 
4469 		list_for_each_entry(slabp, &l3->slabs_full, list)
4470 			handle_slab(n, cachep, slabp);
4471 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4472 			handle_slab(n, cachep, slabp);
4473 		spin_unlock_irq(&l3->list_lock);
4474 	}
4475 	name = cachep->name;
4476 	if (n[0] == n[1]) {
4477 		/* Increase the buffer size */
4478 		mutex_unlock(&cache_chain_mutex);
4479 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4480 		if (!m->private) {
4481 			/* Too bad, we are really out */
4482 			m->private = n;
4483 			mutex_lock(&cache_chain_mutex);
4484 			return -ENOMEM;
4485 		}
4486 		*(unsigned long *)m->private = n[0] * 2;
4487 		kfree(n);
4488 		mutex_lock(&cache_chain_mutex);
4489 		/* Now make sure this entry will be retried */
4490 		m->count = m->size;
4491 		return 0;
4492 	}
4493 	for (i = 0; i < n[1]; i++) {
4494 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4495 		show_symbol(m, n[2*i+2]);
4496 		seq_putc(m, '\n');
4497 	}
4498 
4499 	return 0;
4500 }
4501 
4502 static const struct seq_operations slabstats_op = {
4503 	.start = leaks_start,
4504 	.next = s_next,
4505 	.stop = s_stop,
4506 	.show = leaks_show,
4507 };
4508 
4509 static int slabstats_open(struct inode *inode, struct file *file)
4510 {
4511 	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4512 	int ret = -ENOMEM;
4513 	if (n) {
4514 		ret = seq_open(file, &slabstats_op);
4515 		if (!ret) {
4516 			struct seq_file *m = file->private_data;
4517 			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4518 			m->private = n;
4519 			n = NULL;
4520 		}
4521 		kfree(n);
4522 	}
4523 	return ret;
4524 }
4525 
4526 static const struct file_operations proc_slabstats_operations = {
4527 	.open		= slabstats_open,
4528 	.read		= seq_read,
4529 	.llseek		= seq_lseek,
4530 	.release	= seq_release_private,
4531 };
4532 #endif
4533 
4534 static int __init slab_proc_init(void)
4535 {
4536 	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4537 #ifdef CONFIG_DEBUG_SLAB_LEAK
4538 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4539 #endif
4540 	return 0;
4541 }
4542 module_init(slab_proc_init);
4543 #endif
4544 
4545 /**
4546  * ksize - get the actual amount of memory allocated for a given object
4547  * @objp: Pointer to the object
4548  *
4549  * kmalloc may internally round up allocations and return more memory
4550  * than requested. ksize() can be used to determine the actual amount of
4551  * memory allocated. The caller may use this additional memory, even though
4552  * a smaller amount of memory was initially specified with the kmalloc call.
4553  * The caller must guarantee that objp points to a valid object previously
4554  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4555  * must not be freed during the duration of the call.
4556  */
4557 size_t ksize(const void *objp)
4558 {
4559 	BUG_ON(!objp);
4560 	if (unlikely(objp == ZERO_SIZE_PTR))
4561 		return 0;
4562 
4563 	return obj_size(virt_to_cache(objp));
4564 }
4565 EXPORT_SYMBOL(ksize);
4566