xref: /openbmc/linux/mm/slab.c (revision d670b479)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<asm/cacheflush.h>
121 #include	<asm/tlbflush.h>
122 #include	<asm/page.h>
123 
124 #include <trace/events/kmem.h>
125 
126 /*
127  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
128  *		  0 for faster, smaller code (especially in the critical paths).
129  *
130  * STATS	- 1 to collect stats for /proc/slabinfo.
131  *		  0 for faster, smaller code (especially in the critical paths).
132  *
133  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
134  */
135 
136 #ifdef CONFIG_DEBUG_SLAB
137 #define	DEBUG		1
138 #define	STATS		1
139 #define	FORCED_DEBUG	1
140 #else
141 #define	DEBUG		0
142 #define	STATS		0
143 #define	FORCED_DEBUG	0
144 #endif
145 
146 /* Shouldn't this be in a header file somewhere? */
147 #define	BYTES_PER_WORD		sizeof(void *)
148 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
149 
150 #ifndef ARCH_KMALLOC_FLAGS
151 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
152 #endif
153 
154 /* Legal flag mask for kmem_cache_create(). */
155 #if DEBUG
156 # define CREATE_MASK	(SLAB_RED_ZONE | \
157 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
158 			 SLAB_CACHE_DMA | \
159 			 SLAB_STORE_USER | \
160 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
161 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
162 			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
163 #else
164 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
165 			 SLAB_CACHE_DMA | \
166 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
167 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
168 			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
169 #endif
170 
171 /*
172  * kmem_bufctl_t:
173  *
174  * Bufctl's are used for linking objs within a slab
175  * linked offsets.
176  *
177  * This implementation relies on "struct page" for locating the cache &
178  * slab an object belongs to.
179  * This allows the bufctl structure to be small (one int), but limits
180  * the number of objects a slab (not a cache) can contain when off-slab
181  * bufctls are used. The limit is the size of the largest general cache
182  * that does not use off-slab slabs.
183  * For 32bit archs with 4 kB pages, is this 56.
184  * This is not serious, as it is only for large objects, when it is unwise
185  * to have too many per slab.
186  * Note: This limit can be raised by introducing a general cache whose size
187  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
188  */
189 
190 typedef unsigned int kmem_bufctl_t;
191 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
192 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
193 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
194 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
195 
196 /*
197  * struct slab_rcu
198  *
199  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
200  * arrange for kmem_freepages to be called via RCU.  This is useful if
201  * we need to approach a kernel structure obliquely, from its address
202  * obtained without the usual locking.  We can lock the structure to
203  * stabilize it and check it's still at the given address, only if we
204  * can be sure that the memory has not been meanwhile reused for some
205  * other kind of object (which our subsystem's lock might corrupt).
206  *
207  * rcu_read_lock before reading the address, then rcu_read_unlock after
208  * taking the spinlock within the structure expected at that address.
209  */
210 struct slab_rcu {
211 	struct rcu_head head;
212 	struct kmem_cache *cachep;
213 	void *addr;
214 };
215 
216 /*
217  * struct slab
218  *
219  * Manages the objs in a slab. Placed either at the beginning of mem allocated
220  * for a slab, or allocated from an general cache.
221  * Slabs are chained into three list: fully used, partial, fully free slabs.
222  */
223 struct slab {
224 	union {
225 		struct {
226 			struct list_head list;
227 			unsigned long colouroff;
228 			void *s_mem;		/* including colour offset */
229 			unsigned int inuse;	/* num of objs active in slab */
230 			kmem_bufctl_t free;
231 			unsigned short nodeid;
232 		};
233 		struct slab_rcu __slab_cover_slab_rcu;
234 	};
235 };
236 
237 /*
238  * struct array_cache
239  *
240  * Purpose:
241  * - LIFO ordering, to hand out cache-warm objects from _alloc
242  * - reduce the number of linked list operations
243  * - reduce spinlock operations
244  *
245  * The limit is stored in the per-cpu structure to reduce the data cache
246  * footprint.
247  *
248  */
249 struct array_cache {
250 	unsigned int avail;
251 	unsigned int limit;
252 	unsigned int batchcount;
253 	unsigned int touched;
254 	spinlock_t lock;
255 	void *entry[];	/*
256 			 * Must have this definition in here for the proper
257 			 * alignment of array_cache. Also simplifies accessing
258 			 * the entries.
259 			 */
260 };
261 
262 /*
263  * bootstrap: The caches do not work without cpuarrays anymore, but the
264  * cpuarrays are allocated from the generic caches...
265  */
266 #define BOOT_CPUCACHE_ENTRIES	1
267 struct arraycache_init {
268 	struct array_cache cache;
269 	void *entries[BOOT_CPUCACHE_ENTRIES];
270 };
271 
272 /*
273  * The slab lists for all objects.
274  */
275 struct kmem_list3 {
276 	struct list_head slabs_partial;	/* partial list first, better asm code */
277 	struct list_head slabs_full;
278 	struct list_head slabs_free;
279 	unsigned long free_objects;
280 	unsigned int free_limit;
281 	unsigned int colour_next;	/* Per-node cache coloring */
282 	spinlock_t list_lock;
283 	struct array_cache *shared;	/* shared per node */
284 	struct array_cache **alien;	/* on other nodes */
285 	unsigned long next_reap;	/* updated without locking */
286 	int free_touched;		/* updated without locking */
287 };
288 
289 /*
290  * Need this for bootstrapping a per node allocator.
291  */
292 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
293 static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
294 #define	CACHE_CACHE 0
295 #define	SIZE_AC MAX_NUMNODES
296 #define	SIZE_L3 (2 * MAX_NUMNODES)
297 
298 static int drain_freelist(struct kmem_cache *cache,
299 			struct kmem_list3 *l3, int tofree);
300 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
301 			int node);
302 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
303 static void cache_reap(struct work_struct *unused);
304 
305 /*
306  * This function must be completely optimized away if a constant is passed to
307  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
308  */
309 static __always_inline int index_of(const size_t size)
310 {
311 	extern void __bad_size(void);
312 
313 	if (__builtin_constant_p(size)) {
314 		int i = 0;
315 
316 #define CACHE(x) \
317 	if (size <=x) \
318 		return i; \
319 	else \
320 		i++;
321 #include <linux/kmalloc_sizes.h>
322 #undef CACHE
323 		__bad_size();
324 	} else
325 		__bad_size();
326 	return 0;
327 }
328 
329 static int slab_early_init = 1;
330 
331 #define INDEX_AC index_of(sizeof(struct arraycache_init))
332 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
333 
334 static void kmem_list3_init(struct kmem_list3 *parent)
335 {
336 	INIT_LIST_HEAD(&parent->slabs_full);
337 	INIT_LIST_HEAD(&parent->slabs_partial);
338 	INIT_LIST_HEAD(&parent->slabs_free);
339 	parent->shared = NULL;
340 	parent->alien = NULL;
341 	parent->colour_next = 0;
342 	spin_lock_init(&parent->list_lock);
343 	parent->free_objects = 0;
344 	parent->free_touched = 0;
345 }
346 
347 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
348 	do {								\
349 		INIT_LIST_HEAD(listp);					\
350 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
351 	} while (0)
352 
353 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
354 	do {								\
355 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
356 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
357 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
358 	} while (0)
359 
360 #define CFLGS_OFF_SLAB		(0x80000000UL)
361 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
362 
363 #define BATCHREFILL_LIMIT	16
364 /*
365  * Optimization question: fewer reaps means less probability for unnessary
366  * cpucache drain/refill cycles.
367  *
368  * OTOH the cpuarrays can contain lots of objects,
369  * which could lock up otherwise freeable slabs.
370  */
371 #define REAPTIMEOUT_CPUC	(2*HZ)
372 #define REAPTIMEOUT_LIST3	(4*HZ)
373 
374 #if STATS
375 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
376 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
377 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
378 #define	STATS_INC_GROWN(x)	((x)->grown++)
379 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
380 #define	STATS_SET_HIGH(x)						\
381 	do {								\
382 		if ((x)->num_active > (x)->high_mark)			\
383 			(x)->high_mark = (x)->num_active;		\
384 	} while (0)
385 #define	STATS_INC_ERR(x)	((x)->errors++)
386 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
387 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
388 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
389 #define	STATS_SET_FREEABLE(x, i)					\
390 	do {								\
391 		if ((x)->max_freeable < i)				\
392 			(x)->max_freeable = i;				\
393 	} while (0)
394 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
395 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
396 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
397 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
398 #else
399 #define	STATS_INC_ACTIVE(x)	do { } while (0)
400 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
401 #define	STATS_INC_ALLOCED(x)	do { } while (0)
402 #define	STATS_INC_GROWN(x)	do { } while (0)
403 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
404 #define	STATS_SET_HIGH(x)	do { } while (0)
405 #define	STATS_INC_ERR(x)	do { } while (0)
406 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
407 #define	STATS_INC_NODEFREES(x)	do { } while (0)
408 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
409 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
410 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
411 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
412 #define STATS_INC_FREEHIT(x)	do { } while (0)
413 #define STATS_INC_FREEMISS(x)	do { } while (0)
414 #endif
415 
416 #if DEBUG
417 
418 /*
419  * memory layout of objects:
420  * 0		: objp
421  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
422  * 		the end of an object is aligned with the end of the real
423  * 		allocation. Catches writes behind the end of the allocation.
424  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
425  * 		redzone word.
426  * cachep->obj_offset: The real object.
427  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
428  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
429  *					[BYTES_PER_WORD long]
430  */
431 static int obj_offset(struct kmem_cache *cachep)
432 {
433 	return cachep->obj_offset;
434 }
435 
436 static int obj_size(struct kmem_cache *cachep)
437 {
438 	return cachep->obj_size;
439 }
440 
441 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
442 {
443 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
444 	return (unsigned long long*) (objp + obj_offset(cachep) -
445 				      sizeof(unsigned long long));
446 }
447 
448 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
449 {
450 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
451 	if (cachep->flags & SLAB_STORE_USER)
452 		return (unsigned long long *)(objp + cachep->buffer_size -
453 					      sizeof(unsigned long long) -
454 					      REDZONE_ALIGN);
455 	return (unsigned long long *) (objp + cachep->buffer_size -
456 				       sizeof(unsigned long long));
457 }
458 
459 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
460 {
461 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
462 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
463 }
464 
465 #else
466 
467 #define obj_offset(x)			0
468 #define obj_size(cachep)		(cachep->buffer_size)
469 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
470 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
471 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
472 
473 #endif
474 
475 #ifdef CONFIG_TRACING
476 size_t slab_buffer_size(struct kmem_cache *cachep)
477 {
478 	return cachep->buffer_size;
479 }
480 EXPORT_SYMBOL(slab_buffer_size);
481 #endif
482 
483 /*
484  * Do not go above this order unless 0 objects fit into the slab or
485  * overridden on the command line.
486  */
487 #define	SLAB_MAX_ORDER_HI	1
488 #define	SLAB_MAX_ORDER_LO	0
489 static int slab_max_order = SLAB_MAX_ORDER_LO;
490 static bool slab_max_order_set __initdata;
491 
492 /*
493  * Functions for storing/retrieving the cachep and or slab from the page
494  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
495  * these are used to find the cache which an obj belongs to.
496  */
497 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
498 {
499 	page->lru.next = (struct list_head *)cache;
500 }
501 
502 static inline struct kmem_cache *page_get_cache(struct page *page)
503 {
504 	page = compound_head(page);
505 	BUG_ON(!PageSlab(page));
506 	return (struct kmem_cache *)page->lru.next;
507 }
508 
509 static inline void page_set_slab(struct page *page, struct slab *slab)
510 {
511 	page->lru.prev = (struct list_head *)slab;
512 }
513 
514 static inline struct slab *page_get_slab(struct page *page)
515 {
516 	BUG_ON(!PageSlab(page));
517 	return (struct slab *)page->lru.prev;
518 }
519 
520 static inline struct kmem_cache *virt_to_cache(const void *obj)
521 {
522 	struct page *page = virt_to_head_page(obj);
523 	return page_get_cache(page);
524 }
525 
526 static inline struct slab *virt_to_slab(const void *obj)
527 {
528 	struct page *page = virt_to_head_page(obj);
529 	return page_get_slab(page);
530 }
531 
532 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
533 				 unsigned int idx)
534 {
535 	return slab->s_mem + cache->buffer_size * idx;
536 }
537 
538 /*
539  * We want to avoid an expensive divide : (offset / cache->buffer_size)
540  *   Using the fact that buffer_size is a constant for a particular cache,
541  *   we can replace (offset / cache->buffer_size) by
542  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
543  */
544 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
545 					const struct slab *slab, void *obj)
546 {
547 	u32 offset = (obj - slab->s_mem);
548 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
549 }
550 
551 /*
552  * These are the default caches for kmalloc. Custom caches can have other sizes.
553  */
554 struct cache_sizes malloc_sizes[] = {
555 #define CACHE(x) { .cs_size = (x) },
556 #include <linux/kmalloc_sizes.h>
557 	CACHE(ULONG_MAX)
558 #undef CACHE
559 };
560 EXPORT_SYMBOL(malloc_sizes);
561 
562 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
563 struct cache_names {
564 	char *name;
565 	char *name_dma;
566 };
567 
568 static struct cache_names __initdata cache_names[] = {
569 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
570 #include <linux/kmalloc_sizes.h>
571 	{NULL,}
572 #undef CACHE
573 };
574 
575 static struct arraycache_init initarray_cache __initdata =
576     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
577 static struct arraycache_init initarray_generic =
578     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
579 
580 /* internal cache of cache description objs */
581 static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
582 static struct kmem_cache cache_cache = {
583 	.nodelists = cache_cache_nodelists,
584 	.batchcount = 1,
585 	.limit = BOOT_CPUCACHE_ENTRIES,
586 	.shared = 1,
587 	.buffer_size = sizeof(struct kmem_cache),
588 	.name = "kmem_cache",
589 };
590 
591 #define BAD_ALIEN_MAGIC 0x01020304ul
592 
593 /*
594  * chicken and egg problem: delay the per-cpu array allocation
595  * until the general caches are up.
596  */
597 static enum {
598 	NONE,
599 	PARTIAL_AC,
600 	PARTIAL_L3,
601 	EARLY,
602 	LATE,
603 	FULL
604 } g_cpucache_up;
605 
606 /*
607  * used by boot code to determine if it can use slab based allocator
608  */
609 int slab_is_available(void)
610 {
611 	return g_cpucache_up >= EARLY;
612 }
613 
614 #ifdef CONFIG_LOCKDEP
615 
616 /*
617  * Slab sometimes uses the kmalloc slabs to store the slab headers
618  * for other slabs "off slab".
619  * The locking for this is tricky in that it nests within the locks
620  * of all other slabs in a few places; to deal with this special
621  * locking we put on-slab caches into a separate lock-class.
622  *
623  * We set lock class for alien array caches which are up during init.
624  * The lock annotation will be lost if all cpus of a node goes down and
625  * then comes back up during hotplug
626  */
627 static struct lock_class_key on_slab_l3_key;
628 static struct lock_class_key on_slab_alc_key;
629 
630 static struct lock_class_key debugobj_l3_key;
631 static struct lock_class_key debugobj_alc_key;
632 
633 static void slab_set_lock_classes(struct kmem_cache *cachep,
634 		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
635 		int q)
636 {
637 	struct array_cache **alc;
638 	struct kmem_list3 *l3;
639 	int r;
640 
641 	l3 = cachep->nodelists[q];
642 	if (!l3)
643 		return;
644 
645 	lockdep_set_class(&l3->list_lock, l3_key);
646 	alc = l3->alien;
647 	/*
648 	 * FIXME: This check for BAD_ALIEN_MAGIC
649 	 * should go away when common slab code is taught to
650 	 * work even without alien caches.
651 	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
652 	 * for alloc_alien_cache,
653 	 */
654 	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
655 		return;
656 	for_each_node(r) {
657 		if (alc[r])
658 			lockdep_set_class(&alc[r]->lock, alc_key);
659 	}
660 }
661 
662 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
663 {
664 	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
665 }
666 
667 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
668 {
669 	int node;
670 
671 	for_each_online_node(node)
672 		slab_set_debugobj_lock_classes_node(cachep, node);
673 }
674 
675 static void init_node_lock_keys(int q)
676 {
677 	struct cache_sizes *s = malloc_sizes;
678 
679 	if (g_cpucache_up < LATE)
680 		return;
681 
682 	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
683 		struct kmem_list3 *l3;
684 
685 		l3 = s->cs_cachep->nodelists[q];
686 		if (!l3 || OFF_SLAB(s->cs_cachep))
687 			continue;
688 
689 		slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
690 				&on_slab_alc_key, q);
691 	}
692 }
693 
694 static inline void init_lock_keys(void)
695 {
696 	int node;
697 
698 	for_each_node(node)
699 		init_node_lock_keys(node);
700 }
701 #else
702 static void init_node_lock_keys(int q)
703 {
704 }
705 
706 static inline void init_lock_keys(void)
707 {
708 }
709 
710 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
711 {
712 }
713 
714 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
715 {
716 }
717 #endif
718 
719 /*
720  * Guard access to the cache-chain.
721  */
722 static DEFINE_MUTEX(cache_chain_mutex);
723 static struct list_head cache_chain;
724 
725 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
726 
727 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
728 {
729 	return cachep->array[smp_processor_id()];
730 }
731 
732 static inline struct kmem_cache *__find_general_cachep(size_t size,
733 							gfp_t gfpflags)
734 {
735 	struct cache_sizes *csizep = malloc_sizes;
736 
737 #if DEBUG
738 	/* This happens if someone tries to call
739 	 * kmem_cache_create(), or __kmalloc(), before
740 	 * the generic caches are initialized.
741 	 */
742 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
743 #endif
744 	if (!size)
745 		return ZERO_SIZE_PTR;
746 
747 	while (size > csizep->cs_size)
748 		csizep++;
749 
750 	/*
751 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
752 	 * has cs_{dma,}cachep==NULL. Thus no special case
753 	 * for large kmalloc calls required.
754 	 */
755 #ifdef CONFIG_ZONE_DMA
756 	if (unlikely(gfpflags & GFP_DMA))
757 		return csizep->cs_dmacachep;
758 #endif
759 	return csizep->cs_cachep;
760 }
761 
762 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
763 {
764 	return __find_general_cachep(size, gfpflags);
765 }
766 
767 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
768 {
769 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
770 }
771 
772 /*
773  * Calculate the number of objects and left-over bytes for a given buffer size.
774  */
775 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
776 			   size_t align, int flags, size_t *left_over,
777 			   unsigned int *num)
778 {
779 	int nr_objs;
780 	size_t mgmt_size;
781 	size_t slab_size = PAGE_SIZE << gfporder;
782 
783 	/*
784 	 * The slab management structure can be either off the slab or
785 	 * on it. For the latter case, the memory allocated for a
786 	 * slab is used for:
787 	 *
788 	 * - The struct slab
789 	 * - One kmem_bufctl_t for each object
790 	 * - Padding to respect alignment of @align
791 	 * - @buffer_size bytes for each object
792 	 *
793 	 * If the slab management structure is off the slab, then the
794 	 * alignment will already be calculated into the size. Because
795 	 * the slabs are all pages aligned, the objects will be at the
796 	 * correct alignment when allocated.
797 	 */
798 	if (flags & CFLGS_OFF_SLAB) {
799 		mgmt_size = 0;
800 		nr_objs = slab_size / buffer_size;
801 
802 		if (nr_objs > SLAB_LIMIT)
803 			nr_objs = SLAB_LIMIT;
804 	} else {
805 		/*
806 		 * Ignore padding for the initial guess. The padding
807 		 * is at most @align-1 bytes, and @buffer_size is at
808 		 * least @align. In the worst case, this result will
809 		 * be one greater than the number of objects that fit
810 		 * into the memory allocation when taking the padding
811 		 * into account.
812 		 */
813 		nr_objs = (slab_size - sizeof(struct slab)) /
814 			  (buffer_size + sizeof(kmem_bufctl_t));
815 
816 		/*
817 		 * This calculated number will be either the right
818 		 * amount, or one greater than what we want.
819 		 */
820 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
821 		       > slab_size)
822 			nr_objs--;
823 
824 		if (nr_objs > SLAB_LIMIT)
825 			nr_objs = SLAB_LIMIT;
826 
827 		mgmt_size = slab_mgmt_size(nr_objs, align);
828 	}
829 	*num = nr_objs;
830 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
831 }
832 
833 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
834 
835 static void __slab_error(const char *function, struct kmem_cache *cachep,
836 			char *msg)
837 {
838 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
839 	       function, cachep->name, msg);
840 	dump_stack();
841 }
842 
843 /*
844  * By default on NUMA we use alien caches to stage the freeing of
845  * objects allocated from other nodes. This causes massive memory
846  * inefficiencies when using fake NUMA setup to split memory into a
847  * large number of small nodes, so it can be disabled on the command
848  * line
849   */
850 
851 static int use_alien_caches __read_mostly = 1;
852 static int __init noaliencache_setup(char *s)
853 {
854 	use_alien_caches = 0;
855 	return 1;
856 }
857 __setup("noaliencache", noaliencache_setup);
858 
859 static int __init slab_max_order_setup(char *str)
860 {
861 	get_option(&str, &slab_max_order);
862 	slab_max_order = slab_max_order < 0 ? 0 :
863 				min(slab_max_order, MAX_ORDER - 1);
864 	slab_max_order_set = true;
865 
866 	return 1;
867 }
868 __setup("slab_max_order=", slab_max_order_setup);
869 
870 #ifdef CONFIG_NUMA
871 /*
872  * Special reaping functions for NUMA systems called from cache_reap().
873  * These take care of doing round robin flushing of alien caches (containing
874  * objects freed on different nodes from which they were allocated) and the
875  * flushing of remote pcps by calling drain_node_pages.
876  */
877 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
878 
879 static void init_reap_node(int cpu)
880 {
881 	int node;
882 
883 	node = next_node(cpu_to_mem(cpu), node_online_map);
884 	if (node == MAX_NUMNODES)
885 		node = first_node(node_online_map);
886 
887 	per_cpu(slab_reap_node, cpu) = node;
888 }
889 
890 static void next_reap_node(void)
891 {
892 	int node = __this_cpu_read(slab_reap_node);
893 
894 	node = next_node(node, node_online_map);
895 	if (unlikely(node >= MAX_NUMNODES))
896 		node = first_node(node_online_map);
897 	__this_cpu_write(slab_reap_node, node);
898 }
899 
900 #else
901 #define init_reap_node(cpu) do { } while (0)
902 #define next_reap_node(void) do { } while (0)
903 #endif
904 
905 /*
906  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
907  * via the workqueue/eventd.
908  * Add the CPU number into the expiration time to minimize the possibility of
909  * the CPUs getting into lockstep and contending for the global cache chain
910  * lock.
911  */
912 static void __cpuinit start_cpu_timer(int cpu)
913 {
914 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
915 
916 	/*
917 	 * When this gets called from do_initcalls via cpucache_init(),
918 	 * init_workqueues() has already run, so keventd will be setup
919 	 * at that time.
920 	 */
921 	if (keventd_up() && reap_work->work.func == NULL) {
922 		init_reap_node(cpu);
923 		INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
924 		schedule_delayed_work_on(cpu, reap_work,
925 					__round_jiffies_relative(HZ, cpu));
926 	}
927 }
928 
929 static struct array_cache *alloc_arraycache(int node, int entries,
930 					    int batchcount, gfp_t gfp)
931 {
932 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
933 	struct array_cache *nc = NULL;
934 
935 	nc = kmalloc_node(memsize, gfp, node);
936 	/*
937 	 * The array_cache structures contain pointers to free object.
938 	 * However, when such objects are allocated or transferred to another
939 	 * cache the pointers are not cleared and they could be counted as
940 	 * valid references during a kmemleak scan. Therefore, kmemleak must
941 	 * not scan such objects.
942 	 */
943 	kmemleak_no_scan(nc);
944 	if (nc) {
945 		nc->avail = 0;
946 		nc->limit = entries;
947 		nc->batchcount = batchcount;
948 		nc->touched = 0;
949 		spin_lock_init(&nc->lock);
950 	}
951 	return nc;
952 }
953 
954 /*
955  * Transfer objects in one arraycache to another.
956  * Locking must be handled by the caller.
957  *
958  * Return the number of entries transferred.
959  */
960 static int transfer_objects(struct array_cache *to,
961 		struct array_cache *from, unsigned int max)
962 {
963 	/* Figure out how many entries to transfer */
964 	int nr = min3(from->avail, max, to->limit - to->avail);
965 
966 	if (!nr)
967 		return 0;
968 
969 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
970 			sizeof(void *) *nr);
971 
972 	from->avail -= nr;
973 	to->avail += nr;
974 	return nr;
975 }
976 
977 #ifndef CONFIG_NUMA
978 
979 #define drain_alien_cache(cachep, alien) do { } while (0)
980 #define reap_alien(cachep, l3) do { } while (0)
981 
982 static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
983 {
984 	return (struct array_cache **)BAD_ALIEN_MAGIC;
985 }
986 
987 static inline void free_alien_cache(struct array_cache **ac_ptr)
988 {
989 }
990 
991 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
992 {
993 	return 0;
994 }
995 
996 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
997 		gfp_t flags)
998 {
999 	return NULL;
1000 }
1001 
1002 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1003 		 gfp_t flags, int nodeid)
1004 {
1005 	return NULL;
1006 }
1007 
1008 #else	/* CONFIG_NUMA */
1009 
1010 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1011 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1012 
1013 static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1014 {
1015 	struct array_cache **ac_ptr;
1016 	int memsize = sizeof(void *) * nr_node_ids;
1017 	int i;
1018 
1019 	if (limit > 1)
1020 		limit = 12;
1021 	ac_ptr = kzalloc_node(memsize, gfp, node);
1022 	if (ac_ptr) {
1023 		for_each_node(i) {
1024 			if (i == node || !node_online(i))
1025 				continue;
1026 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1027 			if (!ac_ptr[i]) {
1028 				for (i--; i >= 0; i--)
1029 					kfree(ac_ptr[i]);
1030 				kfree(ac_ptr);
1031 				return NULL;
1032 			}
1033 		}
1034 	}
1035 	return ac_ptr;
1036 }
1037 
1038 static void free_alien_cache(struct array_cache **ac_ptr)
1039 {
1040 	int i;
1041 
1042 	if (!ac_ptr)
1043 		return;
1044 	for_each_node(i)
1045 	    kfree(ac_ptr[i]);
1046 	kfree(ac_ptr);
1047 }
1048 
1049 static void __drain_alien_cache(struct kmem_cache *cachep,
1050 				struct array_cache *ac, int node)
1051 {
1052 	struct kmem_list3 *rl3 = cachep->nodelists[node];
1053 
1054 	if (ac->avail) {
1055 		spin_lock(&rl3->list_lock);
1056 		/*
1057 		 * Stuff objects into the remote nodes shared array first.
1058 		 * That way we could avoid the overhead of putting the objects
1059 		 * into the free lists and getting them back later.
1060 		 */
1061 		if (rl3->shared)
1062 			transfer_objects(rl3->shared, ac, ac->limit);
1063 
1064 		free_block(cachep, ac->entry, ac->avail, node);
1065 		ac->avail = 0;
1066 		spin_unlock(&rl3->list_lock);
1067 	}
1068 }
1069 
1070 /*
1071  * Called from cache_reap() to regularly drain alien caches round robin.
1072  */
1073 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1074 {
1075 	int node = __this_cpu_read(slab_reap_node);
1076 
1077 	if (l3->alien) {
1078 		struct array_cache *ac = l3->alien[node];
1079 
1080 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1081 			__drain_alien_cache(cachep, ac, node);
1082 			spin_unlock_irq(&ac->lock);
1083 		}
1084 	}
1085 }
1086 
1087 static void drain_alien_cache(struct kmem_cache *cachep,
1088 				struct array_cache **alien)
1089 {
1090 	int i = 0;
1091 	struct array_cache *ac;
1092 	unsigned long flags;
1093 
1094 	for_each_online_node(i) {
1095 		ac = alien[i];
1096 		if (ac) {
1097 			spin_lock_irqsave(&ac->lock, flags);
1098 			__drain_alien_cache(cachep, ac, i);
1099 			spin_unlock_irqrestore(&ac->lock, flags);
1100 		}
1101 	}
1102 }
1103 
1104 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1105 {
1106 	struct slab *slabp = virt_to_slab(objp);
1107 	int nodeid = slabp->nodeid;
1108 	struct kmem_list3 *l3;
1109 	struct array_cache *alien = NULL;
1110 	int node;
1111 
1112 	node = numa_mem_id();
1113 
1114 	/*
1115 	 * Make sure we are not freeing a object from another node to the array
1116 	 * cache on this cpu.
1117 	 */
1118 	if (likely(slabp->nodeid == node))
1119 		return 0;
1120 
1121 	l3 = cachep->nodelists[node];
1122 	STATS_INC_NODEFREES(cachep);
1123 	if (l3->alien && l3->alien[nodeid]) {
1124 		alien = l3->alien[nodeid];
1125 		spin_lock(&alien->lock);
1126 		if (unlikely(alien->avail == alien->limit)) {
1127 			STATS_INC_ACOVERFLOW(cachep);
1128 			__drain_alien_cache(cachep, alien, nodeid);
1129 		}
1130 		alien->entry[alien->avail++] = objp;
1131 		spin_unlock(&alien->lock);
1132 	} else {
1133 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1134 		free_block(cachep, &objp, 1, nodeid);
1135 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1136 	}
1137 	return 1;
1138 }
1139 #endif
1140 
1141 /*
1142  * Allocates and initializes nodelists for a node on each slab cache, used for
1143  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
1144  * will be allocated off-node since memory is not yet online for the new node.
1145  * When hotplugging memory or a cpu, existing nodelists are not replaced if
1146  * already in use.
1147  *
1148  * Must hold cache_chain_mutex.
1149  */
1150 static int init_cache_nodelists_node(int node)
1151 {
1152 	struct kmem_cache *cachep;
1153 	struct kmem_list3 *l3;
1154 	const int memsize = sizeof(struct kmem_list3);
1155 
1156 	list_for_each_entry(cachep, &cache_chain, next) {
1157 		/*
1158 		 * Set up the size64 kmemlist for cpu before we can
1159 		 * begin anything. Make sure some other cpu on this
1160 		 * node has not already allocated this
1161 		 */
1162 		if (!cachep->nodelists[node]) {
1163 			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1164 			if (!l3)
1165 				return -ENOMEM;
1166 			kmem_list3_init(l3);
1167 			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1168 			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1169 
1170 			/*
1171 			 * The l3s don't come and go as CPUs come and
1172 			 * go.  cache_chain_mutex is sufficient
1173 			 * protection here.
1174 			 */
1175 			cachep->nodelists[node] = l3;
1176 		}
1177 
1178 		spin_lock_irq(&cachep->nodelists[node]->list_lock);
1179 		cachep->nodelists[node]->free_limit =
1180 			(1 + nr_cpus_node(node)) *
1181 			cachep->batchcount + cachep->num;
1182 		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1183 	}
1184 	return 0;
1185 }
1186 
1187 static void __cpuinit cpuup_canceled(long cpu)
1188 {
1189 	struct kmem_cache *cachep;
1190 	struct kmem_list3 *l3 = NULL;
1191 	int node = cpu_to_mem(cpu);
1192 	const struct cpumask *mask = cpumask_of_node(node);
1193 
1194 	list_for_each_entry(cachep, &cache_chain, next) {
1195 		struct array_cache *nc;
1196 		struct array_cache *shared;
1197 		struct array_cache **alien;
1198 
1199 		/* cpu is dead; no one can alloc from it. */
1200 		nc = cachep->array[cpu];
1201 		cachep->array[cpu] = NULL;
1202 		l3 = cachep->nodelists[node];
1203 
1204 		if (!l3)
1205 			goto free_array_cache;
1206 
1207 		spin_lock_irq(&l3->list_lock);
1208 
1209 		/* Free limit for this kmem_list3 */
1210 		l3->free_limit -= cachep->batchcount;
1211 		if (nc)
1212 			free_block(cachep, nc->entry, nc->avail, node);
1213 
1214 		if (!cpumask_empty(mask)) {
1215 			spin_unlock_irq(&l3->list_lock);
1216 			goto free_array_cache;
1217 		}
1218 
1219 		shared = l3->shared;
1220 		if (shared) {
1221 			free_block(cachep, shared->entry,
1222 				   shared->avail, node);
1223 			l3->shared = NULL;
1224 		}
1225 
1226 		alien = l3->alien;
1227 		l3->alien = NULL;
1228 
1229 		spin_unlock_irq(&l3->list_lock);
1230 
1231 		kfree(shared);
1232 		if (alien) {
1233 			drain_alien_cache(cachep, alien);
1234 			free_alien_cache(alien);
1235 		}
1236 free_array_cache:
1237 		kfree(nc);
1238 	}
1239 	/*
1240 	 * In the previous loop, all the objects were freed to
1241 	 * the respective cache's slabs,  now we can go ahead and
1242 	 * shrink each nodelist to its limit.
1243 	 */
1244 	list_for_each_entry(cachep, &cache_chain, next) {
1245 		l3 = cachep->nodelists[node];
1246 		if (!l3)
1247 			continue;
1248 		drain_freelist(cachep, l3, l3->free_objects);
1249 	}
1250 }
1251 
1252 static int __cpuinit cpuup_prepare(long cpu)
1253 {
1254 	struct kmem_cache *cachep;
1255 	struct kmem_list3 *l3 = NULL;
1256 	int node = cpu_to_mem(cpu);
1257 	int err;
1258 
1259 	/*
1260 	 * We need to do this right in the beginning since
1261 	 * alloc_arraycache's are going to use this list.
1262 	 * kmalloc_node allows us to add the slab to the right
1263 	 * kmem_list3 and not this cpu's kmem_list3
1264 	 */
1265 	err = init_cache_nodelists_node(node);
1266 	if (err < 0)
1267 		goto bad;
1268 
1269 	/*
1270 	 * Now we can go ahead with allocating the shared arrays and
1271 	 * array caches
1272 	 */
1273 	list_for_each_entry(cachep, &cache_chain, next) {
1274 		struct array_cache *nc;
1275 		struct array_cache *shared = NULL;
1276 		struct array_cache **alien = NULL;
1277 
1278 		nc = alloc_arraycache(node, cachep->limit,
1279 					cachep->batchcount, GFP_KERNEL);
1280 		if (!nc)
1281 			goto bad;
1282 		if (cachep->shared) {
1283 			shared = alloc_arraycache(node,
1284 				cachep->shared * cachep->batchcount,
1285 				0xbaadf00d, GFP_KERNEL);
1286 			if (!shared) {
1287 				kfree(nc);
1288 				goto bad;
1289 			}
1290 		}
1291 		if (use_alien_caches) {
1292 			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1293 			if (!alien) {
1294 				kfree(shared);
1295 				kfree(nc);
1296 				goto bad;
1297 			}
1298 		}
1299 		cachep->array[cpu] = nc;
1300 		l3 = cachep->nodelists[node];
1301 		BUG_ON(!l3);
1302 
1303 		spin_lock_irq(&l3->list_lock);
1304 		if (!l3->shared) {
1305 			/*
1306 			 * We are serialised from CPU_DEAD or
1307 			 * CPU_UP_CANCELLED by the cpucontrol lock
1308 			 */
1309 			l3->shared = shared;
1310 			shared = NULL;
1311 		}
1312 #ifdef CONFIG_NUMA
1313 		if (!l3->alien) {
1314 			l3->alien = alien;
1315 			alien = NULL;
1316 		}
1317 #endif
1318 		spin_unlock_irq(&l3->list_lock);
1319 		kfree(shared);
1320 		free_alien_cache(alien);
1321 		if (cachep->flags & SLAB_DEBUG_OBJECTS)
1322 			slab_set_debugobj_lock_classes_node(cachep, node);
1323 	}
1324 	init_node_lock_keys(node);
1325 
1326 	return 0;
1327 bad:
1328 	cpuup_canceled(cpu);
1329 	return -ENOMEM;
1330 }
1331 
1332 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1333 				    unsigned long action, void *hcpu)
1334 {
1335 	long cpu = (long)hcpu;
1336 	int err = 0;
1337 
1338 	switch (action) {
1339 	case CPU_UP_PREPARE:
1340 	case CPU_UP_PREPARE_FROZEN:
1341 		mutex_lock(&cache_chain_mutex);
1342 		err = cpuup_prepare(cpu);
1343 		mutex_unlock(&cache_chain_mutex);
1344 		break;
1345 	case CPU_ONLINE:
1346 	case CPU_ONLINE_FROZEN:
1347 		start_cpu_timer(cpu);
1348 		break;
1349 #ifdef CONFIG_HOTPLUG_CPU
1350   	case CPU_DOWN_PREPARE:
1351   	case CPU_DOWN_PREPARE_FROZEN:
1352 		/*
1353 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1354 		 * held so that if cache_reap() is invoked it cannot do
1355 		 * anything expensive but will only modify reap_work
1356 		 * and reschedule the timer.
1357 		*/
1358 		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1359 		/* Now the cache_reaper is guaranteed to be not running. */
1360 		per_cpu(slab_reap_work, cpu).work.func = NULL;
1361   		break;
1362   	case CPU_DOWN_FAILED:
1363   	case CPU_DOWN_FAILED_FROZEN:
1364 		start_cpu_timer(cpu);
1365   		break;
1366 	case CPU_DEAD:
1367 	case CPU_DEAD_FROZEN:
1368 		/*
1369 		 * Even if all the cpus of a node are down, we don't free the
1370 		 * kmem_list3 of any cache. This to avoid a race between
1371 		 * cpu_down, and a kmalloc allocation from another cpu for
1372 		 * memory from the node of the cpu going down.  The list3
1373 		 * structure is usually allocated from kmem_cache_create() and
1374 		 * gets destroyed at kmem_cache_destroy().
1375 		 */
1376 		/* fall through */
1377 #endif
1378 	case CPU_UP_CANCELED:
1379 	case CPU_UP_CANCELED_FROZEN:
1380 		mutex_lock(&cache_chain_mutex);
1381 		cpuup_canceled(cpu);
1382 		mutex_unlock(&cache_chain_mutex);
1383 		break;
1384 	}
1385 	return notifier_from_errno(err);
1386 }
1387 
1388 static struct notifier_block __cpuinitdata cpucache_notifier = {
1389 	&cpuup_callback, NULL, 0
1390 };
1391 
1392 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1393 /*
1394  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1395  * Returns -EBUSY if all objects cannot be drained so that the node is not
1396  * removed.
1397  *
1398  * Must hold cache_chain_mutex.
1399  */
1400 static int __meminit drain_cache_nodelists_node(int node)
1401 {
1402 	struct kmem_cache *cachep;
1403 	int ret = 0;
1404 
1405 	list_for_each_entry(cachep, &cache_chain, next) {
1406 		struct kmem_list3 *l3;
1407 
1408 		l3 = cachep->nodelists[node];
1409 		if (!l3)
1410 			continue;
1411 
1412 		drain_freelist(cachep, l3, l3->free_objects);
1413 
1414 		if (!list_empty(&l3->slabs_full) ||
1415 		    !list_empty(&l3->slabs_partial)) {
1416 			ret = -EBUSY;
1417 			break;
1418 		}
1419 	}
1420 	return ret;
1421 }
1422 
1423 static int __meminit slab_memory_callback(struct notifier_block *self,
1424 					unsigned long action, void *arg)
1425 {
1426 	struct memory_notify *mnb = arg;
1427 	int ret = 0;
1428 	int nid;
1429 
1430 	nid = mnb->status_change_nid;
1431 	if (nid < 0)
1432 		goto out;
1433 
1434 	switch (action) {
1435 	case MEM_GOING_ONLINE:
1436 		mutex_lock(&cache_chain_mutex);
1437 		ret = init_cache_nodelists_node(nid);
1438 		mutex_unlock(&cache_chain_mutex);
1439 		break;
1440 	case MEM_GOING_OFFLINE:
1441 		mutex_lock(&cache_chain_mutex);
1442 		ret = drain_cache_nodelists_node(nid);
1443 		mutex_unlock(&cache_chain_mutex);
1444 		break;
1445 	case MEM_ONLINE:
1446 	case MEM_OFFLINE:
1447 	case MEM_CANCEL_ONLINE:
1448 	case MEM_CANCEL_OFFLINE:
1449 		break;
1450 	}
1451 out:
1452 	return notifier_from_errno(ret);
1453 }
1454 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1455 
1456 /*
1457  * swap the static kmem_list3 with kmalloced memory
1458  */
1459 static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1460 				int nodeid)
1461 {
1462 	struct kmem_list3 *ptr;
1463 
1464 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1465 	BUG_ON(!ptr);
1466 
1467 	memcpy(ptr, list, sizeof(struct kmem_list3));
1468 	/*
1469 	 * Do not assume that spinlocks can be initialized via memcpy:
1470 	 */
1471 	spin_lock_init(&ptr->list_lock);
1472 
1473 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1474 	cachep->nodelists[nodeid] = ptr;
1475 }
1476 
1477 /*
1478  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1479  * size of kmem_list3.
1480  */
1481 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1482 {
1483 	int node;
1484 
1485 	for_each_online_node(node) {
1486 		cachep->nodelists[node] = &initkmem_list3[index + node];
1487 		cachep->nodelists[node]->next_reap = jiffies +
1488 		    REAPTIMEOUT_LIST3 +
1489 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1490 	}
1491 }
1492 
1493 /*
1494  * Initialisation.  Called after the page allocator have been initialised and
1495  * before smp_init().
1496  */
1497 void __init kmem_cache_init(void)
1498 {
1499 	size_t left_over;
1500 	struct cache_sizes *sizes;
1501 	struct cache_names *names;
1502 	int i;
1503 	int order;
1504 	int node;
1505 
1506 	if (num_possible_nodes() == 1)
1507 		use_alien_caches = 0;
1508 
1509 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1510 		kmem_list3_init(&initkmem_list3[i]);
1511 		if (i < MAX_NUMNODES)
1512 			cache_cache.nodelists[i] = NULL;
1513 	}
1514 	set_up_list3s(&cache_cache, CACHE_CACHE);
1515 
1516 	/*
1517 	 * Fragmentation resistance on low memory - only use bigger
1518 	 * page orders on machines with more than 32MB of memory if
1519 	 * not overridden on the command line.
1520 	 */
1521 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1522 		slab_max_order = SLAB_MAX_ORDER_HI;
1523 
1524 	/* Bootstrap is tricky, because several objects are allocated
1525 	 * from caches that do not exist yet:
1526 	 * 1) initialize the cache_cache cache: it contains the struct
1527 	 *    kmem_cache structures of all caches, except cache_cache itself:
1528 	 *    cache_cache is statically allocated.
1529 	 *    Initially an __init data area is used for the head array and the
1530 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1531 	 *    array at the end of the bootstrap.
1532 	 * 2) Create the first kmalloc cache.
1533 	 *    The struct kmem_cache for the new cache is allocated normally.
1534 	 *    An __init data area is used for the head array.
1535 	 * 3) Create the remaining kmalloc caches, with minimally sized
1536 	 *    head arrays.
1537 	 * 4) Replace the __init data head arrays for cache_cache and the first
1538 	 *    kmalloc cache with kmalloc allocated arrays.
1539 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1540 	 *    the other cache's with kmalloc allocated memory.
1541 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1542 	 */
1543 
1544 	node = numa_mem_id();
1545 
1546 	/* 1) create the cache_cache */
1547 	INIT_LIST_HEAD(&cache_chain);
1548 	list_add(&cache_cache.next, &cache_chain);
1549 	cache_cache.colour_off = cache_line_size();
1550 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1551 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1552 
1553 	/*
1554 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1555 	 */
1556 	cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1557 				  nr_node_ids * sizeof(struct kmem_list3 *);
1558 #if DEBUG
1559 	cache_cache.obj_size = cache_cache.buffer_size;
1560 #endif
1561 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1562 					cache_line_size());
1563 	cache_cache.reciprocal_buffer_size =
1564 		reciprocal_value(cache_cache.buffer_size);
1565 
1566 	for (order = 0; order < MAX_ORDER; order++) {
1567 		cache_estimate(order, cache_cache.buffer_size,
1568 			cache_line_size(), 0, &left_over, &cache_cache.num);
1569 		if (cache_cache.num)
1570 			break;
1571 	}
1572 	BUG_ON(!cache_cache.num);
1573 	cache_cache.gfporder = order;
1574 	cache_cache.colour = left_over / cache_cache.colour_off;
1575 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1576 				      sizeof(struct slab), cache_line_size());
1577 
1578 	/* 2+3) create the kmalloc caches */
1579 	sizes = malloc_sizes;
1580 	names = cache_names;
1581 
1582 	/*
1583 	 * Initialize the caches that provide memory for the array cache and the
1584 	 * kmem_list3 structures first.  Without this, further allocations will
1585 	 * bug.
1586 	 */
1587 
1588 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1589 					sizes[INDEX_AC].cs_size,
1590 					ARCH_KMALLOC_MINALIGN,
1591 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1592 					NULL);
1593 
1594 	if (INDEX_AC != INDEX_L3) {
1595 		sizes[INDEX_L3].cs_cachep =
1596 			kmem_cache_create(names[INDEX_L3].name,
1597 				sizes[INDEX_L3].cs_size,
1598 				ARCH_KMALLOC_MINALIGN,
1599 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1600 				NULL);
1601 	}
1602 
1603 	slab_early_init = 0;
1604 
1605 	while (sizes->cs_size != ULONG_MAX) {
1606 		/*
1607 		 * For performance, all the general caches are L1 aligned.
1608 		 * This should be particularly beneficial on SMP boxes, as it
1609 		 * eliminates "false sharing".
1610 		 * Note for systems short on memory removing the alignment will
1611 		 * allow tighter packing of the smaller caches.
1612 		 */
1613 		if (!sizes->cs_cachep) {
1614 			sizes->cs_cachep = kmem_cache_create(names->name,
1615 					sizes->cs_size,
1616 					ARCH_KMALLOC_MINALIGN,
1617 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1618 					NULL);
1619 		}
1620 #ifdef CONFIG_ZONE_DMA
1621 		sizes->cs_dmacachep = kmem_cache_create(
1622 					names->name_dma,
1623 					sizes->cs_size,
1624 					ARCH_KMALLOC_MINALIGN,
1625 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1626 						SLAB_PANIC,
1627 					NULL);
1628 #endif
1629 		sizes++;
1630 		names++;
1631 	}
1632 	/* 4) Replace the bootstrap head arrays */
1633 	{
1634 		struct array_cache *ptr;
1635 
1636 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1637 
1638 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1639 		memcpy(ptr, cpu_cache_get(&cache_cache),
1640 		       sizeof(struct arraycache_init));
1641 		/*
1642 		 * Do not assume that spinlocks can be initialized via memcpy:
1643 		 */
1644 		spin_lock_init(&ptr->lock);
1645 
1646 		cache_cache.array[smp_processor_id()] = ptr;
1647 
1648 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1649 
1650 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1651 		       != &initarray_generic.cache);
1652 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1653 		       sizeof(struct arraycache_init));
1654 		/*
1655 		 * Do not assume that spinlocks can be initialized via memcpy:
1656 		 */
1657 		spin_lock_init(&ptr->lock);
1658 
1659 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1660 		    ptr;
1661 	}
1662 	/* 5) Replace the bootstrap kmem_list3's */
1663 	{
1664 		int nid;
1665 
1666 		for_each_online_node(nid) {
1667 			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1668 
1669 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1670 				  &initkmem_list3[SIZE_AC + nid], nid);
1671 
1672 			if (INDEX_AC != INDEX_L3) {
1673 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1674 					  &initkmem_list3[SIZE_L3 + nid], nid);
1675 			}
1676 		}
1677 	}
1678 
1679 	g_cpucache_up = EARLY;
1680 }
1681 
1682 void __init kmem_cache_init_late(void)
1683 {
1684 	struct kmem_cache *cachep;
1685 
1686 	g_cpucache_up = LATE;
1687 
1688 	/* Annotate slab for lockdep -- annotate the malloc caches */
1689 	init_lock_keys();
1690 
1691 	/* 6) resize the head arrays to their final sizes */
1692 	mutex_lock(&cache_chain_mutex);
1693 	list_for_each_entry(cachep, &cache_chain, next)
1694 		if (enable_cpucache(cachep, GFP_NOWAIT))
1695 			BUG();
1696 	mutex_unlock(&cache_chain_mutex);
1697 
1698 	/* Done! */
1699 	g_cpucache_up = FULL;
1700 
1701 	/*
1702 	 * Register a cpu startup notifier callback that initializes
1703 	 * cpu_cache_get for all new cpus
1704 	 */
1705 	register_cpu_notifier(&cpucache_notifier);
1706 
1707 #ifdef CONFIG_NUMA
1708 	/*
1709 	 * Register a memory hotplug callback that initializes and frees
1710 	 * nodelists.
1711 	 */
1712 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1713 #endif
1714 
1715 	/*
1716 	 * The reap timers are started later, with a module init call: That part
1717 	 * of the kernel is not yet operational.
1718 	 */
1719 }
1720 
1721 static int __init cpucache_init(void)
1722 {
1723 	int cpu;
1724 
1725 	/*
1726 	 * Register the timers that return unneeded pages to the page allocator
1727 	 */
1728 	for_each_online_cpu(cpu)
1729 		start_cpu_timer(cpu);
1730 	return 0;
1731 }
1732 __initcall(cpucache_init);
1733 
1734 static noinline void
1735 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1736 {
1737 	struct kmem_list3 *l3;
1738 	struct slab *slabp;
1739 	unsigned long flags;
1740 	int node;
1741 
1742 	printk(KERN_WARNING
1743 		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1744 		nodeid, gfpflags);
1745 	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1746 		cachep->name, cachep->buffer_size, cachep->gfporder);
1747 
1748 	for_each_online_node(node) {
1749 		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1750 		unsigned long active_slabs = 0, num_slabs = 0;
1751 
1752 		l3 = cachep->nodelists[node];
1753 		if (!l3)
1754 			continue;
1755 
1756 		spin_lock_irqsave(&l3->list_lock, flags);
1757 		list_for_each_entry(slabp, &l3->slabs_full, list) {
1758 			active_objs += cachep->num;
1759 			active_slabs++;
1760 		}
1761 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
1762 			active_objs += slabp->inuse;
1763 			active_slabs++;
1764 		}
1765 		list_for_each_entry(slabp, &l3->slabs_free, list)
1766 			num_slabs++;
1767 
1768 		free_objects += l3->free_objects;
1769 		spin_unlock_irqrestore(&l3->list_lock, flags);
1770 
1771 		num_slabs += active_slabs;
1772 		num_objs = num_slabs * cachep->num;
1773 		printk(KERN_WARNING
1774 			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1775 			node, active_slabs, num_slabs, active_objs, num_objs,
1776 			free_objects);
1777 	}
1778 }
1779 
1780 /*
1781  * Interface to system's page allocator. No need to hold the cache-lock.
1782  *
1783  * If we requested dmaable memory, we will get it. Even if we
1784  * did not request dmaable memory, we might get it, but that
1785  * would be relatively rare and ignorable.
1786  */
1787 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1788 {
1789 	struct page *page;
1790 	int nr_pages;
1791 	int i;
1792 
1793 #ifndef CONFIG_MMU
1794 	/*
1795 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1796 	 * requires __GFP_COMP to properly refcount higher order allocations
1797 	 */
1798 	flags |= __GFP_COMP;
1799 #endif
1800 
1801 	flags |= cachep->gfpflags;
1802 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1803 		flags |= __GFP_RECLAIMABLE;
1804 
1805 	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1806 	if (!page) {
1807 		if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1808 			slab_out_of_memory(cachep, flags, nodeid);
1809 		return NULL;
1810 	}
1811 
1812 	nr_pages = (1 << cachep->gfporder);
1813 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1814 		add_zone_page_state(page_zone(page),
1815 			NR_SLAB_RECLAIMABLE, nr_pages);
1816 	else
1817 		add_zone_page_state(page_zone(page),
1818 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1819 	for (i = 0; i < nr_pages; i++)
1820 		__SetPageSlab(page + i);
1821 
1822 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1823 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1824 
1825 		if (cachep->ctor)
1826 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1827 		else
1828 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1829 	}
1830 
1831 	return page_address(page);
1832 }
1833 
1834 /*
1835  * Interface to system's page release.
1836  */
1837 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1838 {
1839 	unsigned long i = (1 << cachep->gfporder);
1840 	struct page *page = virt_to_page(addr);
1841 	const unsigned long nr_freed = i;
1842 
1843 	kmemcheck_free_shadow(page, cachep->gfporder);
1844 
1845 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1846 		sub_zone_page_state(page_zone(page),
1847 				NR_SLAB_RECLAIMABLE, nr_freed);
1848 	else
1849 		sub_zone_page_state(page_zone(page),
1850 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1851 	while (i--) {
1852 		BUG_ON(!PageSlab(page));
1853 		__ClearPageSlab(page);
1854 		page++;
1855 	}
1856 	if (current->reclaim_state)
1857 		current->reclaim_state->reclaimed_slab += nr_freed;
1858 	free_pages((unsigned long)addr, cachep->gfporder);
1859 }
1860 
1861 static void kmem_rcu_free(struct rcu_head *head)
1862 {
1863 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1864 	struct kmem_cache *cachep = slab_rcu->cachep;
1865 
1866 	kmem_freepages(cachep, slab_rcu->addr);
1867 	if (OFF_SLAB(cachep))
1868 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1869 }
1870 
1871 #if DEBUG
1872 
1873 #ifdef CONFIG_DEBUG_PAGEALLOC
1874 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1875 			    unsigned long caller)
1876 {
1877 	int size = obj_size(cachep);
1878 
1879 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1880 
1881 	if (size < 5 * sizeof(unsigned long))
1882 		return;
1883 
1884 	*addr++ = 0x12345678;
1885 	*addr++ = caller;
1886 	*addr++ = smp_processor_id();
1887 	size -= 3 * sizeof(unsigned long);
1888 	{
1889 		unsigned long *sptr = &caller;
1890 		unsigned long svalue;
1891 
1892 		while (!kstack_end(sptr)) {
1893 			svalue = *sptr++;
1894 			if (kernel_text_address(svalue)) {
1895 				*addr++ = svalue;
1896 				size -= sizeof(unsigned long);
1897 				if (size <= sizeof(unsigned long))
1898 					break;
1899 			}
1900 		}
1901 
1902 	}
1903 	*addr++ = 0x87654321;
1904 }
1905 #endif
1906 
1907 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1908 {
1909 	int size = obj_size(cachep);
1910 	addr = &((char *)addr)[obj_offset(cachep)];
1911 
1912 	memset(addr, val, size);
1913 	*(unsigned char *)(addr + size - 1) = POISON_END;
1914 }
1915 
1916 static void dump_line(char *data, int offset, int limit)
1917 {
1918 	int i;
1919 	unsigned char error = 0;
1920 	int bad_count = 0;
1921 
1922 	printk(KERN_ERR "%03x: ", offset);
1923 	for (i = 0; i < limit; i++) {
1924 		if (data[offset + i] != POISON_FREE) {
1925 			error = data[offset + i];
1926 			bad_count++;
1927 		}
1928 	}
1929 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1930 			&data[offset], limit, 1);
1931 
1932 	if (bad_count == 1) {
1933 		error ^= POISON_FREE;
1934 		if (!(error & (error - 1))) {
1935 			printk(KERN_ERR "Single bit error detected. Probably "
1936 					"bad RAM.\n");
1937 #ifdef CONFIG_X86
1938 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1939 					"test tool.\n");
1940 #else
1941 			printk(KERN_ERR "Run a memory test tool.\n");
1942 #endif
1943 		}
1944 	}
1945 }
1946 #endif
1947 
1948 #if DEBUG
1949 
1950 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1951 {
1952 	int i, size;
1953 	char *realobj;
1954 
1955 	if (cachep->flags & SLAB_RED_ZONE) {
1956 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1957 			*dbg_redzone1(cachep, objp),
1958 			*dbg_redzone2(cachep, objp));
1959 	}
1960 
1961 	if (cachep->flags & SLAB_STORE_USER) {
1962 		printk(KERN_ERR "Last user: [<%p>]",
1963 			*dbg_userword(cachep, objp));
1964 		print_symbol("(%s)",
1965 				(unsigned long)*dbg_userword(cachep, objp));
1966 		printk("\n");
1967 	}
1968 	realobj = (char *)objp + obj_offset(cachep);
1969 	size = obj_size(cachep);
1970 	for (i = 0; i < size && lines; i += 16, lines--) {
1971 		int limit;
1972 		limit = 16;
1973 		if (i + limit > size)
1974 			limit = size - i;
1975 		dump_line(realobj, i, limit);
1976 	}
1977 }
1978 
1979 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1980 {
1981 	char *realobj;
1982 	int size, i;
1983 	int lines = 0;
1984 
1985 	realobj = (char *)objp + obj_offset(cachep);
1986 	size = obj_size(cachep);
1987 
1988 	for (i = 0; i < size; i++) {
1989 		char exp = POISON_FREE;
1990 		if (i == size - 1)
1991 			exp = POISON_END;
1992 		if (realobj[i] != exp) {
1993 			int limit;
1994 			/* Mismatch ! */
1995 			/* Print header */
1996 			if (lines == 0) {
1997 				printk(KERN_ERR
1998 					"Slab corruption (%s): %s start=%p, len=%d\n",
1999 					print_tainted(), cachep->name, realobj, size);
2000 				print_objinfo(cachep, objp, 0);
2001 			}
2002 			/* Hexdump the affected line */
2003 			i = (i / 16) * 16;
2004 			limit = 16;
2005 			if (i + limit > size)
2006 				limit = size - i;
2007 			dump_line(realobj, i, limit);
2008 			i += 16;
2009 			lines++;
2010 			/* Limit to 5 lines */
2011 			if (lines > 5)
2012 				break;
2013 		}
2014 	}
2015 	if (lines != 0) {
2016 		/* Print some data about the neighboring objects, if they
2017 		 * exist:
2018 		 */
2019 		struct slab *slabp = virt_to_slab(objp);
2020 		unsigned int objnr;
2021 
2022 		objnr = obj_to_index(cachep, slabp, objp);
2023 		if (objnr) {
2024 			objp = index_to_obj(cachep, slabp, objnr - 1);
2025 			realobj = (char *)objp + obj_offset(cachep);
2026 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
2027 			       realobj, size);
2028 			print_objinfo(cachep, objp, 2);
2029 		}
2030 		if (objnr + 1 < cachep->num) {
2031 			objp = index_to_obj(cachep, slabp, objnr + 1);
2032 			realobj = (char *)objp + obj_offset(cachep);
2033 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
2034 			       realobj, size);
2035 			print_objinfo(cachep, objp, 2);
2036 		}
2037 	}
2038 }
2039 #endif
2040 
2041 #if DEBUG
2042 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2043 {
2044 	int i;
2045 	for (i = 0; i < cachep->num; i++) {
2046 		void *objp = index_to_obj(cachep, slabp, i);
2047 
2048 		if (cachep->flags & SLAB_POISON) {
2049 #ifdef CONFIG_DEBUG_PAGEALLOC
2050 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
2051 					OFF_SLAB(cachep))
2052 				kernel_map_pages(virt_to_page(objp),
2053 					cachep->buffer_size / PAGE_SIZE, 1);
2054 			else
2055 				check_poison_obj(cachep, objp);
2056 #else
2057 			check_poison_obj(cachep, objp);
2058 #endif
2059 		}
2060 		if (cachep->flags & SLAB_RED_ZONE) {
2061 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2062 				slab_error(cachep, "start of a freed object "
2063 					   "was overwritten");
2064 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2065 				slab_error(cachep, "end of a freed object "
2066 					   "was overwritten");
2067 		}
2068 	}
2069 }
2070 #else
2071 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2072 {
2073 }
2074 #endif
2075 
2076 /**
2077  * slab_destroy - destroy and release all objects in a slab
2078  * @cachep: cache pointer being destroyed
2079  * @slabp: slab pointer being destroyed
2080  *
2081  * Destroy all the objs in a slab, and release the mem back to the system.
2082  * Before calling the slab must have been unlinked from the cache.  The
2083  * cache-lock is not held/needed.
2084  */
2085 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2086 {
2087 	void *addr = slabp->s_mem - slabp->colouroff;
2088 
2089 	slab_destroy_debugcheck(cachep, slabp);
2090 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2091 		struct slab_rcu *slab_rcu;
2092 
2093 		slab_rcu = (struct slab_rcu *)slabp;
2094 		slab_rcu->cachep = cachep;
2095 		slab_rcu->addr = addr;
2096 		call_rcu(&slab_rcu->head, kmem_rcu_free);
2097 	} else {
2098 		kmem_freepages(cachep, addr);
2099 		if (OFF_SLAB(cachep))
2100 			kmem_cache_free(cachep->slabp_cache, slabp);
2101 	}
2102 }
2103 
2104 static void __kmem_cache_destroy(struct kmem_cache *cachep)
2105 {
2106 	int i;
2107 	struct kmem_list3 *l3;
2108 
2109 	for_each_online_cpu(i)
2110 	    kfree(cachep->array[i]);
2111 
2112 	/* NUMA: free the list3 structures */
2113 	for_each_online_node(i) {
2114 		l3 = cachep->nodelists[i];
2115 		if (l3) {
2116 			kfree(l3->shared);
2117 			free_alien_cache(l3->alien);
2118 			kfree(l3);
2119 		}
2120 	}
2121 	kmem_cache_free(&cache_cache, cachep);
2122 }
2123 
2124 
2125 /**
2126  * calculate_slab_order - calculate size (page order) of slabs
2127  * @cachep: pointer to the cache that is being created
2128  * @size: size of objects to be created in this cache.
2129  * @align: required alignment for the objects.
2130  * @flags: slab allocation flags
2131  *
2132  * Also calculates the number of objects per slab.
2133  *
2134  * This could be made much more intelligent.  For now, try to avoid using
2135  * high order pages for slabs.  When the gfp() functions are more friendly
2136  * towards high-order requests, this should be changed.
2137  */
2138 static size_t calculate_slab_order(struct kmem_cache *cachep,
2139 			size_t size, size_t align, unsigned long flags)
2140 {
2141 	unsigned long offslab_limit;
2142 	size_t left_over = 0;
2143 	int gfporder;
2144 
2145 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2146 		unsigned int num;
2147 		size_t remainder;
2148 
2149 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2150 		if (!num)
2151 			continue;
2152 
2153 		if (flags & CFLGS_OFF_SLAB) {
2154 			/*
2155 			 * Max number of objs-per-slab for caches which
2156 			 * use off-slab slabs. Needed to avoid a possible
2157 			 * looping condition in cache_grow().
2158 			 */
2159 			offslab_limit = size - sizeof(struct slab);
2160 			offslab_limit /= sizeof(kmem_bufctl_t);
2161 
2162  			if (num > offslab_limit)
2163 				break;
2164 		}
2165 
2166 		/* Found something acceptable - save it away */
2167 		cachep->num = num;
2168 		cachep->gfporder = gfporder;
2169 		left_over = remainder;
2170 
2171 		/*
2172 		 * A VFS-reclaimable slab tends to have most allocations
2173 		 * as GFP_NOFS and we really don't want to have to be allocating
2174 		 * higher-order pages when we are unable to shrink dcache.
2175 		 */
2176 		if (flags & SLAB_RECLAIM_ACCOUNT)
2177 			break;
2178 
2179 		/*
2180 		 * Large number of objects is good, but very large slabs are
2181 		 * currently bad for the gfp()s.
2182 		 */
2183 		if (gfporder >= slab_max_order)
2184 			break;
2185 
2186 		/*
2187 		 * Acceptable internal fragmentation?
2188 		 */
2189 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2190 			break;
2191 	}
2192 	return left_over;
2193 }
2194 
2195 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2196 {
2197 	if (g_cpucache_up == FULL)
2198 		return enable_cpucache(cachep, gfp);
2199 
2200 	if (g_cpucache_up == NONE) {
2201 		/*
2202 		 * Note: the first kmem_cache_create must create the cache
2203 		 * that's used by kmalloc(24), otherwise the creation of
2204 		 * further caches will BUG().
2205 		 */
2206 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2207 
2208 		/*
2209 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2210 		 * the first cache, then we need to set up all its list3s,
2211 		 * otherwise the creation of further caches will BUG().
2212 		 */
2213 		set_up_list3s(cachep, SIZE_AC);
2214 		if (INDEX_AC == INDEX_L3)
2215 			g_cpucache_up = PARTIAL_L3;
2216 		else
2217 			g_cpucache_up = PARTIAL_AC;
2218 	} else {
2219 		cachep->array[smp_processor_id()] =
2220 			kmalloc(sizeof(struct arraycache_init), gfp);
2221 
2222 		if (g_cpucache_up == PARTIAL_AC) {
2223 			set_up_list3s(cachep, SIZE_L3);
2224 			g_cpucache_up = PARTIAL_L3;
2225 		} else {
2226 			int node;
2227 			for_each_online_node(node) {
2228 				cachep->nodelists[node] =
2229 				    kmalloc_node(sizeof(struct kmem_list3),
2230 						gfp, node);
2231 				BUG_ON(!cachep->nodelists[node]);
2232 				kmem_list3_init(cachep->nodelists[node]);
2233 			}
2234 		}
2235 	}
2236 	cachep->nodelists[numa_mem_id()]->next_reap =
2237 			jiffies + REAPTIMEOUT_LIST3 +
2238 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2239 
2240 	cpu_cache_get(cachep)->avail = 0;
2241 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2242 	cpu_cache_get(cachep)->batchcount = 1;
2243 	cpu_cache_get(cachep)->touched = 0;
2244 	cachep->batchcount = 1;
2245 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2246 	return 0;
2247 }
2248 
2249 /**
2250  * kmem_cache_create - Create a cache.
2251  * @name: A string which is used in /proc/slabinfo to identify this cache.
2252  * @size: The size of objects to be created in this cache.
2253  * @align: The required alignment for the objects.
2254  * @flags: SLAB flags
2255  * @ctor: A constructor for the objects.
2256  *
2257  * Returns a ptr to the cache on success, NULL on failure.
2258  * Cannot be called within a int, but can be interrupted.
2259  * The @ctor is run when new pages are allocated by the cache.
2260  *
2261  * @name must be valid until the cache is destroyed. This implies that
2262  * the module calling this has to destroy the cache before getting unloaded.
2263  *
2264  * The flags are
2265  *
2266  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2267  * to catch references to uninitialised memory.
2268  *
2269  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2270  * for buffer overruns.
2271  *
2272  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2273  * cacheline.  This can be beneficial if you're counting cycles as closely
2274  * as davem.
2275  */
2276 struct kmem_cache *
2277 kmem_cache_create (const char *name, size_t size, size_t align,
2278 	unsigned long flags, void (*ctor)(void *))
2279 {
2280 	size_t left_over, slab_size, ralign;
2281 	struct kmem_cache *cachep = NULL, *pc;
2282 	gfp_t gfp;
2283 
2284 	/*
2285 	 * Sanity checks... these are all serious usage bugs.
2286 	 */
2287 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2288 	    size > KMALLOC_MAX_SIZE) {
2289 		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2290 				name);
2291 		BUG();
2292 	}
2293 
2294 	/*
2295 	 * We use cache_chain_mutex to ensure a consistent view of
2296 	 * cpu_online_mask as well.  Please see cpuup_callback
2297 	 */
2298 	if (slab_is_available()) {
2299 		get_online_cpus();
2300 		mutex_lock(&cache_chain_mutex);
2301 	}
2302 
2303 	list_for_each_entry(pc, &cache_chain, next) {
2304 		char tmp;
2305 		int res;
2306 
2307 		/*
2308 		 * This happens when the module gets unloaded and doesn't
2309 		 * destroy its slab cache and no-one else reuses the vmalloc
2310 		 * area of the module.  Print a warning.
2311 		 */
2312 		res = probe_kernel_address(pc->name, tmp);
2313 		if (res) {
2314 			printk(KERN_ERR
2315 			       "SLAB: cache with size %d has lost its name\n",
2316 			       pc->buffer_size);
2317 			continue;
2318 		}
2319 
2320 		if (!strcmp(pc->name, name)) {
2321 			printk(KERN_ERR
2322 			       "kmem_cache_create: duplicate cache %s\n", name);
2323 			dump_stack();
2324 			goto oops;
2325 		}
2326 	}
2327 
2328 #if DEBUG
2329 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2330 #if FORCED_DEBUG
2331 	/*
2332 	 * Enable redzoning and last user accounting, except for caches with
2333 	 * large objects, if the increased size would increase the object size
2334 	 * above the next power of two: caches with object sizes just above a
2335 	 * power of two have a significant amount of internal fragmentation.
2336 	 */
2337 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2338 						2 * sizeof(unsigned long long)))
2339 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2340 	if (!(flags & SLAB_DESTROY_BY_RCU))
2341 		flags |= SLAB_POISON;
2342 #endif
2343 	if (flags & SLAB_DESTROY_BY_RCU)
2344 		BUG_ON(flags & SLAB_POISON);
2345 #endif
2346 	/*
2347 	 * Always checks flags, a caller might be expecting debug support which
2348 	 * isn't available.
2349 	 */
2350 	BUG_ON(flags & ~CREATE_MASK);
2351 
2352 	/*
2353 	 * Check that size is in terms of words.  This is needed to avoid
2354 	 * unaligned accesses for some archs when redzoning is used, and makes
2355 	 * sure any on-slab bufctl's are also correctly aligned.
2356 	 */
2357 	if (size & (BYTES_PER_WORD - 1)) {
2358 		size += (BYTES_PER_WORD - 1);
2359 		size &= ~(BYTES_PER_WORD - 1);
2360 	}
2361 
2362 	/* calculate the final buffer alignment: */
2363 
2364 	/* 1) arch recommendation: can be overridden for debug */
2365 	if (flags & SLAB_HWCACHE_ALIGN) {
2366 		/*
2367 		 * Default alignment: as specified by the arch code.  Except if
2368 		 * an object is really small, then squeeze multiple objects into
2369 		 * one cacheline.
2370 		 */
2371 		ralign = cache_line_size();
2372 		while (size <= ralign / 2)
2373 			ralign /= 2;
2374 	} else {
2375 		ralign = BYTES_PER_WORD;
2376 	}
2377 
2378 	/*
2379 	 * Redzoning and user store require word alignment or possibly larger.
2380 	 * Note this will be overridden by architecture or caller mandated
2381 	 * alignment if either is greater than BYTES_PER_WORD.
2382 	 */
2383 	if (flags & SLAB_STORE_USER)
2384 		ralign = BYTES_PER_WORD;
2385 
2386 	if (flags & SLAB_RED_ZONE) {
2387 		ralign = REDZONE_ALIGN;
2388 		/* If redzoning, ensure that the second redzone is suitably
2389 		 * aligned, by adjusting the object size accordingly. */
2390 		size += REDZONE_ALIGN - 1;
2391 		size &= ~(REDZONE_ALIGN - 1);
2392 	}
2393 
2394 	/* 2) arch mandated alignment */
2395 	if (ralign < ARCH_SLAB_MINALIGN) {
2396 		ralign = ARCH_SLAB_MINALIGN;
2397 	}
2398 	/* 3) caller mandated alignment */
2399 	if (ralign < align) {
2400 		ralign = align;
2401 	}
2402 	/* disable debug if necessary */
2403 	if (ralign > __alignof__(unsigned long long))
2404 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2405 	/*
2406 	 * 4) Store it.
2407 	 */
2408 	align = ralign;
2409 
2410 	if (slab_is_available())
2411 		gfp = GFP_KERNEL;
2412 	else
2413 		gfp = GFP_NOWAIT;
2414 
2415 	/* Get cache's description obj. */
2416 	cachep = kmem_cache_zalloc(&cache_cache, gfp);
2417 	if (!cachep)
2418 		goto oops;
2419 
2420 	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2421 #if DEBUG
2422 	cachep->obj_size = size;
2423 
2424 	/*
2425 	 * Both debugging options require word-alignment which is calculated
2426 	 * into align above.
2427 	 */
2428 	if (flags & SLAB_RED_ZONE) {
2429 		/* add space for red zone words */
2430 		cachep->obj_offset += sizeof(unsigned long long);
2431 		size += 2 * sizeof(unsigned long long);
2432 	}
2433 	if (flags & SLAB_STORE_USER) {
2434 		/* user store requires one word storage behind the end of
2435 		 * the real object. But if the second red zone needs to be
2436 		 * aligned to 64 bits, we must allow that much space.
2437 		 */
2438 		if (flags & SLAB_RED_ZONE)
2439 			size += REDZONE_ALIGN;
2440 		else
2441 			size += BYTES_PER_WORD;
2442 	}
2443 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2444 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2445 	    && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2446 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2447 		size = PAGE_SIZE;
2448 	}
2449 #endif
2450 #endif
2451 
2452 	/*
2453 	 * Determine if the slab management is 'on' or 'off' slab.
2454 	 * (bootstrapping cannot cope with offslab caches so don't do
2455 	 * it too early on. Always use on-slab management when
2456 	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2457 	 */
2458 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2459 	    !(flags & SLAB_NOLEAKTRACE))
2460 		/*
2461 		 * Size is large, assume best to place the slab management obj
2462 		 * off-slab (should allow better packing of objs).
2463 		 */
2464 		flags |= CFLGS_OFF_SLAB;
2465 
2466 	size = ALIGN(size, align);
2467 
2468 	left_over = calculate_slab_order(cachep, size, align, flags);
2469 
2470 	if (!cachep->num) {
2471 		printk(KERN_ERR
2472 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2473 		kmem_cache_free(&cache_cache, cachep);
2474 		cachep = NULL;
2475 		goto oops;
2476 	}
2477 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2478 			  + sizeof(struct slab), align);
2479 
2480 	/*
2481 	 * If the slab has been placed off-slab, and we have enough space then
2482 	 * move it on-slab. This is at the expense of any extra colouring.
2483 	 */
2484 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2485 		flags &= ~CFLGS_OFF_SLAB;
2486 		left_over -= slab_size;
2487 	}
2488 
2489 	if (flags & CFLGS_OFF_SLAB) {
2490 		/* really off slab. No need for manual alignment */
2491 		slab_size =
2492 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2493 
2494 #ifdef CONFIG_PAGE_POISONING
2495 		/* If we're going to use the generic kernel_map_pages()
2496 		 * poisoning, then it's going to smash the contents of
2497 		 * the redzone and userword anyhow, so switch them off.
2498 		 */
2499 		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2500 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2501 #endif
2502 	}
2503 
2504 	cachep->colour_off = cache_line_size();
2505 	/* Offset must be a multiple of the alignment. */
2506 	if (cachep->colour_off < align)
2507 		cachep->colour_off = align;
2508 	cachep->colour = left_over / cachep->colour_off;
2509 	cachep->slab_size = slab_size;
2510 	cachep->flags = flags;
2511 	cachep->gfpflags = 0;
2512 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2513 		cachep->gfpflags |= GFP_DMA;
2514 	cachep->buffer_size = size;
2515 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2516 
2517 	if (flags & CFLGS_OFF_SLAB) {
2518 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2519 		/*
2520 		 * This is a possibility for one of the malloc_sizes caches.
2521 		 * But since we go off slab only for object size greater than
2522 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2523 		 * this should not happen at all.
2524 		 * But leave a BUG_ON for some lucky dude.
2525 		 */
2526 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2527 	}
2528 	cachep->ctor = ctor;
2529 	cachep->name = name;
2530 
2531 	if (setup_cpu_cache(cachep, gfp)) {
2532 		__kmem_cache_destroy(cachep);
2533 		cachep = NULL;
2534 		goto oops;
2535 	}
2536 
2537 	if (flags & SLAB_DEBUG_OBJECTS) {
2538 		/*
2539 		 * Would deadlock through slab_destroy()->call_rcu()->
2540 		 * debug_object_activate()->kmem_cache_alloc().
2541 		 */
2542 		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2543 
2544 		slab_set_debugobj_lock_classes(cachep);
2545 	}
2546 
2547 	/* cache setup completed, link it into the list */
2548 	list_add(&cachep->next, &cache_chain);
2549 oops:
2550 	if (!cachep && (flags & SLAB_PANIC))
2551 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2552 		      name);
2553 	if (slab_is_available()) {
2554 		mutex_unlock(&cache_chain_mutex);
2555 		put_online_cpus();
2556 	}
2557 	return cachep;
2558 }
2559 EXPORT_SYMBOL(kmem_cache_create);
2560 
2561 #if DEBUG
2562 static void check_irq_off(void)
2563 {
2564 	BUG_ON(!irqs_disabled());
2565 }
2566 
2567 static void check_irq_on(void)
2568 {
2569 	BUG_ON(irqs_disabled());
2570 }
2571 
2572 static void check_spinlock_acquired(struct kmem_cache *cachep)
2573 {
2574 #ifdef CONFIG_SMP
2575 	check_irq_off();
2576 	assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2577 #endif
2578 }
2579 
2580 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2581 {
2582 #ifdef CONFIG_SMP
2583 	check_irq_off();
2584 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2585 #endif
2586 }
2587 
2588 #else
2589 #define check_irq_off()	do { } while(0)
2590 #define check_irq_on()	do { } while(0)
2591 #define check_spinlock_acquired(x) do { } while(0)
2592 #define check_spinlock_acquired_node(x, y) do { } while(0)
2593 #endif
2594 
2595 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2596 			struct array_cache *ac,
2597 			int force, int node);
2598 
2599 static void do_drain(void *arg)
2600 {
2601 	struct kmem_cache *cachep = arg;
2602 	struct array_cache *ac;
2603 	int node = numa_mem_id();
2604 
2605 	check_irq_off();
2606 	ac = cpu_cache_get(cachep);
2607 	spin_lock(&cachep->nodelists[node]->list_lock);
2608 	free_block(cachep, ac->entry, ac->avail, node);
2609 	spin_unlock(&cachep->nodelists[node]->list_lock);
2610 	ac->avail = 0;
2611 }
2612 
2613 static void drain_cpu_caches(struct kmem_cache *cachep)
2614 {
2615 	struct kmem_list3 *l3;
2616 	int node;
2617 
2618 	on_each_cpu(do_drain, cachep, 1);
2619 	check_irq_on();
2620 	for_each_online_node(node) {
2621 		l3 = cachep->nodelists[node];
2622 		if (l3 && l3->alien)
2623 			drain_alien_cache(cachep, l3->alien);
2624 	}
2625 
2626 	for_each_online_node(node) {
2627 		l3 = cachep->nodelists[node];
2628 		if (l3)
2629 			drain_array(cachep, l3, l3->shared, 1, node);
2630 	}
2631 }
2632 
2633 /*
2634  * Remove slabs from the list of free slabs.
2635  * Specify the number of slabs to drain in tofree.
2636  *
2637  * Returns the actual number of slabs released.
2638  */
2639 static int drain_freelist(struct kmem_cache *cache,
2640 			struct kmem_list3 *l3, int tofree)
2641 {
2642 	struct list_head *p;
2643 	int nr_freed;
2644 	struct slab *slabp;
2645 
2646 	nr_freed = 0;
2647 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2648 
2649 		spin_lock_irq(&l3->list_lock);
2650 		p = l3->slabs_free.prev;
2651 		if (p == &l3->slabs_free) {
2652 			spin_unlock_irq(&l3->list_lock);
2653 			goto out;
2654 		}
2655 
2656 		slabp = list_entry(p, struct slab, list);
2657 #if DEBUG
2658 		BUG_ON(slabp->inuse);
2659 #endif
2660 		list_del(&slabp->list);
2661 		/*
2662 		 * Safe to drop the lock. The slab is no longer linked
2663 		 * to the cache.
2664 		 */
2665 		l3->free_objects -= cache->num;
2666 		spin_unlock_irq(&l3->list_lock);
2667 		slab_destroy(cache, slabp);
2668 		nr_freed++;
2669 	}
2670 out:
2671 	return nr_freed;
2672 }
2673 
2674 /* Called with cache_chain_mutex held to protect against cpu hotplug */
2675 static int __cache_shrink(struct kmem_cache *cachep)
2676 {
2677 	int ret = 0, i = 0;
2678 	struct kmem_list3 *l3;
2679 
2680 	drain_cpu_caches(cachep);
2681 
2682 	check_irq_on();
2683 	for_each_online_node(i) {
2684 		l3 = cachep->nodelists[i];
2685 		if (!l3)
2686 			continue;
2687 
2688 		drain_freelist(cachep, l3, l3->free_objects);
2689 
2690 		ret += !list_empty(&l3->slabs_full) ||
2691 			!list_empty(&l3->slabs_partial);
2692 	}
2693 	return (ret ? 1 : 0);
2694 }
2695 
2696 /**
2697  * kmem_cache_shrink - Shrink a cache.
2698  * @cachep: The cache to shrink.
2699  *
2700  * Releases as many slabs as possible for a cache.
2701  * To help debugging, a zero exit status indicates all slabs were released.
2702  */
2703 int kmem_cache_shrink(struct kmem_cache *cachep)
2704 {
2705 	int ret;
2706 	BUG_ON(!cachep || in_interrupt());
2707 
2708 	get_online_cpus();
2709 	mutex_lock(&cache_chain_mutex);
2710 	ret = __cache_shrink(cachep);
2711 	mutex_unlock(&cache_chain_mutex);
2712 	put_online_cpus();
2713 	return ret;
2714 }
2715 EXPORT_SYMBOL(kmem_cache_shrink);
2716 
2717 /**
2718  * kmem_cache_destroy - delete a cache
2719  * @cachep: the cache to destroy
2720  *
2721  * Remove a &struct kmem_cache object from the slab cache.
2722  *
2723  * It is expected this function will be called by a module when it is
2724  * unloaded.  This will remove the cache completely, and avoid a duplicate
2725  * cache being allocated each time a module is loaded and unloaded, if the
2726  * module doesn't have persistent in-kernel storage across loads and unloads.
2727  *
2728  * The cache must be empty before calling this function.
2729  *
2730  * The caller must guarantee that no one will allocate memory from the cache
2731  * during the kmem_cache_destroy().
2732  */
2733 void kmem_cache_destroy(struct kmem_cache *cachep)
2734 {
2735 	BUG_ON(!cachep || in_interrupt());
2736 
2737 	/* Find the cache in the chain of caches. */
2738 	get_online_cpus();
2739 	mutex_lock(&cache_chain_mutex);
2740 	/*
2741 	 * the chain is never empty, cache_cache is never destroyed
2742 	 */
2743 	list_del(&cachep->next);
2744 	if (__cache_shrink(cachep)) {
2745 		slab_error(cachep, "Can't free all objects");
2746 		list_add(&cachep->next, &cache_chain);
2747 		mutex_unlock(&cache_chain_mutex);
2748 		put_online_cpus();
2749 		return;
2750 	}
2751 
2752 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2753 		rcu_barrier();
2754 
2755 	__kmem_cache_destroy(cachep);
2756 	mutex_unlock(&cache_chain_mutex);
2757 	put_online_cpus();
2758 }
2759 EXPORT_SYMBOL(kmem_cache_destroy);
2760 
2761 /*
2762  * Get the memory for a slab management obj.
2763  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2764  * always come from malloc_sizes caches.  The slab descriptor cannot
2765  * come from the same cache which is getting created because,
2766  * when we are searching for an appropriate cache for these
2767  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2768  * If we are creating a malloc_sizes cache here it would not be visible to
2769  * kmem_find_general_cachep till the initialization is complete.
2770  * Hence we cannot have slabp_cache same as the original cache.
2771  */
2772 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2773 				   int colour_off, gfp_t local_flags,
2774 				   int nodeid)
2775 {
2776 	struct slab *slabp;
2777 
2778 	if (OFF_SLAB(cachep)) {
2779 		/* Slab management obj is off-slab. */
2780 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2781 					      local_flags, nodeid);
2782 		/*
2783 		 * If the first object in the slab is leaked (it's allocated
2784 		 * but no one has a reference to it), we want to make sure
2785 		 * kmemleak does not treat the ->s_mem pointer as a reference
2786 		 * to the object. Otherwise we will not report the leak.
2787 		 */
2788 		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2789 				   local_flags);
2790 		if (!slabp)
2791 			return NULL;
2792 	} else {
2793 		slabp = objp + colour_off;
2794 		colour_off += cachep->slab_size;
2795 	}
2796 	slabp->inuse = 0;
2797 	slabp->colouroff = colour_off;
2798 	slabp->s_mem = objp + colour_off;
2799 	slabp->nodeid = nodeid;
2800 	slabp->free = 0;
2801 	return slabp;
2802 }
2803 
2804 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2805 {
2806 	return (kmem_bufctl_t *) (slabp + 1);
2807 }
2808 
2809 static void cache_init_objs(struct kmem_cache *cachep,
2810 			    struct slab *slabp)
2811 {
2812 	int i;
2813 
2814 	for (i = 0; i < cachep->num; i++) {
2815 		void *objp = index_to_obj(cachep, slabp, i);
2816 #if DEBUG
2817 		/* need to poison the objs? */
2818 		if (cachep->flags & SLAB_POISON)
2819 			poison_obj(cachep, objp, POISON_FREE);
2820 		if (cachep->flags & SLAB_STORE_USER)
2821 			*dbg_userword(cachep, objp) = NULL;
2822 
2823 		if (cachep->flags & SLAB_RED_ZONE) {
2824 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2825 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2826 		}
2827 		/*
2828 		 * Constructors are not allowed to allocate memory from the same
2829 		 * cache which they are a constructor for.  Otherwise, deadlock.
2830 		 * They must also be threaded.
2831 		 */
2832 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2833 			cachep->ctor(objp + obj_offset(cachep));
2834 
2835 		if (cachep->flags & SLAB_RED_ZONE) {
2836 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2837 				slab_error(cachep, "constructor overwrote the"
2838 					   " end of an object");
2839 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2840 				slab_error(cachep, "constructor overwrote the"
2841 					   " start of an object");
2842 		}
2843 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2844 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2845 			kernel_map_pages(virt_to_page(objp),
2846 					 cachep->buffer_size / PAGE_SIZE, 0);
2847 #else
2848 		if (cachep->ctor)
2849 			cachep->ctor(objp);
2850 #endif
2851 		slab_bufctl(slabp)[i] = i + 1;
2852 	}
2853 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2854 }
2855 
2856 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2857 {
2858 	if (CONFIG_ZONE_DMA_FLAG) {
2859 		if (flags & GFP_DMA)
2860 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2861 		else
2862 			BUG_ON(cachep->gfpflags & GFP_DMA);
2863 	}
2864 }
2865 
2866 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2867 				int nodeid)
2868 {
2869 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2870 	kmem_bufctl_t next;
2871 
2872 	slabp->inuse++;
2873 	next = slab_bufctl(slabp)[slabp->free];
2874 #if DEBUG
2875 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2876 	WARN_ON(slabp->nodeid != nodeid);
2877 #endif
2878 	slabp->free = next;
2879 
2880 	return objp;
2881 }
2882 
2883 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2884 				void *objp, int nodeid)
2885 {
2886 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2887 
2888 #if DEBUG
2889 	/* Verify that the slab belongs to the intended node */
2890 	WARN_ON(slabp->nodeid != nodeid);
2891 
2892 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2893 		printk(KERN_ERR "slab: double free detected in cache "
2894 				"'%s', objp %p\n", cachep->name, objp);
2895 		BUG();
2896 	}
2897 #endif
2898 	slab_bufctl(slabp)[objnr] = slabp->free;
2899 	slabp->free = objnr;
2900 	slabp->inuse--;
2901 }
2902 
2903 /*
2904  * Map pages beginning at addr to the given cache and slab. This is required
2905  * for the slab allocator to be able to lookup the cache and slab of a
2906  * virtual address for kfree, ksize, and slab debugging.
2907  */
2908 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2909 			   void *addr)
2910 {
2911 	int nr_pages;
2912 	struct page *page;
2913 
2914 	page = virt_to_page(addr);
2915 
2916 	nr_pages = 1;
2917 	if (likely(!PageCompound(page)))
2918 		nr_pages <<= cache->gfporder;
2919 
2920 	do {
2921 		page_set_cache(page, cache);
2922 		page_set_slab(page, slab);
2923 		page++;
2924 	} while (--nr_pages);
2925 }
2926 
2927 /*
2928  * Grow (by 1) the number of slabs within a cache.  This is called by
2929  * kmem_cache_alloc() when there are no active objs left in a cache.
2930  */
2931 static int cache_grow(struct kmem_cache *cachep,
2932 		gfp_t flags, int nodeid, void *objp)
2933 {
2934 	struct slab *slabp;
2935 	size_t offset;
2936 	gfp_t local_flags;
2937 	struct kmem_list3 *l3;
2938 
2939 	/*
2940 	 * Be lazy and only check for valid flags here,  keeping it out of the
2941 	 * critical path in kmem_cache_alloc().
2942 	 */
2943 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2944 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2945 
2946 	/* Take the l3 list lock to change the colour_next on this node */
2947 	check_irq_off();
2948 	l3 = cachep->nodelists[nodeid];
2949 	spin_lock(&l3->list_lock);
2950 
2951 	/* Get colour for the slab, and cal the next value. */
2952 	offset = l3->colour_next;
2953 	l3->colour_next++;
2954 	if (l3->colour_next >= cachep->colour)
2955 		l3->colour_next = 0;
2956 	spin_unlock(&l3->list_lock);
2957 
2958 	offset *= cachep->colour_off;
2959 
2960 	if (local_flags & __GFP_WAIT)
2961 		local_irq_enable();
2962 
2963 	/*
2964 	 * The test for missing atomic flag is performed here, rather than
2965 	 * the more obvious place, simply to reduce the critical path length
2966 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2967 	 * will eventually be caught here (where it matters).
2968 	 */
2969 	kmem_flagcheck(cachep, flags);
2970 
2971 	/*
2972 	 * Get mem for the objs.  Attempt to allocate a physical page from
2973 	 * 'nodeid'.
2974 	 */
2975 	if (!objp)
2976 		objp = kmem_getpages(cachep, local_flags, nodeid);
2977 	if (!objp)
2978 		goto failed;
2979 
2980 	/* Get slab management. */
2981 	slabp = alloc_slabmgmt(cachep, objp, offset,
2982 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2983 	if (!slabp)
2984 		goto opps1;
2985 
2986 	slab_map_pages(cachep, slabp, objp);
2987 
2988 	cache_init_objs(cachep, slabp);
2989 
2990 	if (local_flags & __GFP_WAIT)
2991 		local_irq_disable();
2992 	check_irq_off();
2993 	spin_lock(&l3->list_lock);
2994 
2995 	/* Make slab active. */
2996 	list_add_tail(&slabp->list, &(l3->slabs_free));
2997 	STATS_INC_GROWN(cachep);
2998 	l3->free_objects += cachep->num;
2999 	spin_unlock(&l3->list_lock);
3000 	return 1;
3001 opps1:
3002 	kmem_freepages(cachep, objp);
3003 failed:
3004 	if (local_flags & __GFP_WAIT)
3005 		local_irq_disable();
3006 	return 0;
3007 }
3008 
3009 #if DEBUG
3010 
3011 /*
3012  * Perform extra freeing checks:
3013  * - detect bad pointers.
3014  * - POISON/RED_ZONE checking
3015  */
3016 static void kfree_debugcheck(const void *objp)
3017 {
3018 	if (!virt_addr_valid(objp)) {
3019 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
3020 		       (unsigned long)objp);
3021 		BUG();
3022 	}
3023 }
3024 
3025 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
3026 {
3027 	unsigned long long redzone1, redzone2;
3028 
3029 	redzone1 = *dbg_redzone1(cache, obj);
3030 	redzone2 = *dbg_redzone2(cache, obj);
3031 
3032 	/*
3033 	 * Redzone is ok.
3034 	 */
3035 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
3036 		return;
3037 
3038 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
3039 		slab_error(cache, "double free detected");
3040 	else
3041 		slab_error(cache, "memory outside object was overwritten");
3042 
3043 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
3044 			obj, redzone1, redzone2);
3045 }
3046 
3047 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
3048 				   void *caller)
3049 {
3050 	struct page *page;
3051 	unsigned int objnr;
3052 	struct slab *slabp;
3053 
3054 	BUG_ON(virt_to_cache(objp) != cachep);
3055 
3056 	objp -= obj_offset(cachep);
3057 	kfree_debugcheck(objp);
3058 	page = virt_to_head_page(objp);
3059 
3060 	slabp = page_get_slab(page);
3061 
3062 	if (cachep->flags & SLAB_RED_ZONE) {
3063 		verify_redzone_free(cachep, objp);
3064 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
3065 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
3066 	}
3067 	if (cachep->flags & SLAB_STORE_USER)
3068 		*dbg_userword(cachep, objp) = caller;
3069 
3070 	objnr = obj_to_index(cachep, slabp, objp);
3071 
3072 	BUG_ON(objnr >= cachep->num);
3073 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
3074 
3075 #ifdef CONFIG_DEBUG_SLAB_LEAK
3076 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
3077 #endif
3078 	if (cachep->flags & SLAB_POISON) {
3079 #ifdef CONFIG_DEBUG_PAGEALLOC
3080 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
3081 			store_stackinfo(cachep, objp, (unsigned long)caller);
3082 			kernel_map_pages(virt_to_page(objp),
3083 					 cachep->buffer_size / PAGE_SIZE, 0);
3084 		} else {
3085 			poison_obj(cachep, objp, POISON_FREE);
3086 		}
3087 #else
3088 		poison_obj(cachep, objp, POISON_FREE);
3089 #endif
3090 	}
3091 	return objp;
3092 }
3093 
3094 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
3095 {
3096 	kmem_bufctl_t i;
3097 	int entries = 0;
3098 
3099 	/* Check slab's freelist to see if this obj is there. */
3100 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
3101 		entries++;
3102 		if (entries > cachep->num || i >= cachep->num)
3103 			goto bad;
3104 	}
3105 	if (entries != cachep->num - slabp->inuse) {
3106 bad:
3107 		printk(KERN_ERR "slab: Internal list corruption detected in "
3108 			"cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
3109 			cachep->name, cachep->num, slabp, slabp->inuse,
3110 			print_tainted());
3111 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
3112 			sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
3113 			1);
3114 		BUG();
3115 	}
3116 }
3117 #else
3118 #define kfree_debugcheck(x) do { } while(0)
3119 #define cache_free_debugcheck(x,objp,z) (objp)
3120 #define check_slabp(x,y) do { } while(0)
3121 #endif
3122 
3123 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3124 {
3125 	int batchcount;
3126 	struct kmem_list3 *l3;
3127 	struct array_cache *ac;
3128 	int node;
3129 
3130 retry:
3131 	check_irq_off();
3132 	node = numa_mem_id();
3133 	ac = cpu_cache_get(cachep);
3134 	batchcount = ac->batchcount;
3135 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3136 		/*
3137 		 * If there was little recent activity on this cache, then
3138 		 * perform only a partial refill.  Otherwise we could generate
3139 		 * refill bouncing.
3140 		 */
3141 		batchcount = BATCHREFILL_LIMIT;
3142 	}
3143 	l3 = cachep->nodelists[node];
3144 
3145 	BUG_ON(ac->avail > 0 || !l3);
3146 	spin_lock(&l3->list_lock);
3147 
3148 	/* See if we can refill from the shared array */
3149 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
3150 		l3->shared->touched = 1;
3151 		goto alloc_done;
3152 	}
3153 
3154 	while (batchcount > 0) {
3155 		struct list_head *entry;
3156 		struct slab *slabp;
3157 		/* Get slab alloc is to come from. */
3158 		entry = l3->slabs_partial.next;
3159 		if (entry == &l3->slabs_partial) {
3160 			l3->free_touched = 1;
3161 			entry = l3->slabs_free.next;
3162 			if (entry == &l3->slabs_free)
3163 				goto must_grow;
3164 		}
3165 
3166 		slabp = list_entry(entry, struct slab, list);
3167 		check_slabp(cachep, slabp);
3168 		check_spinlock_acquired(cachep);
3169 
3170 		/*
3171 		 * The slab was either on partial or free list so
3172 		 * there must be at least one object available for
3173 		 * allocation.
3174 		 */
3175 		BUG_ON(slabp->inuse >= cachep->num);
3176 
3177 		while (slabp->inuse < cachep->num && batchcount--) {
3178 			STATS_INC_ALLOCED(cachep);
3179 			STATS_INC_ACTIVE(cachep);
3180 			STATS_SET_HIGH(cachep);
3181 
3182 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3183 							    node);
3184 		}
3185 		check_slabp(cachep, slabp);
3186 
3187 		/* move slabp to correct slabp list: */
3188 		list_del(&slabp->list);
3189 		if (slabp->free == BUFCTL_END)
3190 			list_add(&slabp->list, &l3->slabs_full);
3191 		else
3192 			list_add(&slabp->list, &l3->slabs_partial);
3193 	}
3194 
3195 must_grow:
3196 	l3->free_objects -= ac->avail;
3197 alloc_done:
3198 	spin_unlock(&l3->list_lock);
3199 
3200 	if (unlikely(!ac->avail)) {
3201 		int x;
3202 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3203 
3204 		/* cache_grow can reenable interrupts, then ac could change. */
3205 		ac = cpu_cache_get(cachep);
3206 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3207 			return NULL;
3208 
3209 		if (!ac->avail)		/* objects refilled by interrupt? */
3210 			goto retry;
3211 	}
3212 	ac->touched = 1;
3213 	return ac->entry[--ac->avail];
3214 }
3215 
3216 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3217 						gfp_t flags)
3218 {
3219 	might_sleep_if(flags & __GFP_WAIT);
3220 #if DEBUG
3221 	kmem_flagcheck(cachep, flags);
3222 #endif
3223 }
3224 
3225 #if DEBUG
3226 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3227 				gfp_t flags, void *objp, void *caller)
3228 {
3229 	if (!objp)
3230 		return objp;
3231 	if (cachep->flags & SLAB_POISON) {
3232 #ifdef CONFIG_DEBUG_PAGEALLOC
3233 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3234 			kernel_map_pages(virt_to_page(objp),
3235 					 cachep->buffer_size / PAGE_SIZE, 1);
3236 		else
3237 			check_poison_obj(cachep, objp);
3238 #else
3239 		check_poison_obj(cachep, objp);
3240 #endif
3241 		poison_obj(cachep, objp, POISON_INUSE);
3242 	}
3243 	if (cachep->flags & SLAB_STORE_USER)
3244 		*dbg_userword(cachep, objp) = caller;
3245 
3246 	if (cachep->flags & SLAB_RED_ZONE) {
3247 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3248 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3249 			slab_error(cachep, "double free, or memory outside"
3250 						" object was overwritten");
3251 			printk(KERN_ERR
3252 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3253 				objp, *dbg_redzone1(cachep, objp),
3254 				*dbg_redzone2(cachep, objp));
3255 		}
3256 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3257 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3258 	}
3259 #ifdef CONFIG_DEBUG_SLAB_LEAK
3260 	{
3261 		struct slab *slabp;
3262 		unsigned objnr;
3263 
3264 		slabp = page_get_slab(virt_to_head_page(objp));
3265 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3266 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3267 	}
3268 #endif
3269 	objp += obj_offset(cachep);
3270 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3271 		cachep->ctor(objp);
3272 	if (ARCH_SLAB_MINALIGN &&
3273 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3274 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3275 		       objp, (int)ARCH_SLAB_MINALIGN);
3276 	}
3277 	return objp;
3278 }
3279 #else
3280 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3281 #endif
3282 
3283 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3284 {
3285 	if (cachep == &cache_cache)
3286 		return false;
3287 
3288 	return should_failslab(obj_size(cachep), flags, cachep->flags);
3289 }
3290 
3291 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3292 {
3293 	void *objp;
3294 	struct array_cache *ac;
3295 
3296 	check_irq_off();
3297 
3298 	ac = cpu_cache_get(cachep);
3299 	if (likely(ac->avail)) {
3300 		STATS_INC_ALLOCHIT(cachep);
3301 		ac->touched = 1;
3302 		objp = ac->entry[--ac->avail];
3303 	} else {
3304 		STATS_INC_ALLOCMISS(cachep);
3305 		objp = cache_alloc_refill(cachep, flags);
3306 		/*
3307 		 * the 'ac' may be updated by cache_alloc_refill(),
3308 		 * and kmemleak_erase() requires its correct value.
3309 		 */
3310 		ac = cpu_cache_get(cachep);
3311 	}
3312 	/*
3313 	 * To avoid a false negative, if an object that is in one of the
3314 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3315 	 * treat the array pointers as a reference to the object.
3316 	 */
3317 	if (objp)
3318 		kmemleak_erase(&ac->entry[ac->avail]);
3319 	return objp;
3320 }
3321 
3322 #ifdef CONFIG_NUMA
3323 /*
3324  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3325  *
3326  * If we are in_interrupt, then process context, including cpusets and
3327  * mempolicy, may not apply and should not be used for allocation policy.
3328  */
3329 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3330 {
3331 	int nid_alloc, nid_here;
3332 
3333 	if (in_interrupt() || (flags & __GFP_THISNODE))
3334 		return NULL;
3335 	nid_alloc = nid_here = numa_mem_id();
3336 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3337 		nid_alloc = cpuset_slab_spread_node();
3338 	else if (current->mempolicy)
3339 		nid_alloc = slab_node(current->mempolicy);
3340 	if (nid_alloc != nid_here)
3341 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3342 	return NULL;
3343 }
3344 
3345 /*
3346  * Fallback function if there was no memory available and no objects on a
3347  * certain node and fall back is permitted. First we scan all the
3348  * available nodelists for available objects. If that fails then we
3349  * perform an allocation without specifying a node. This allows the page
3350  * allocator to do its reclaim / fallback magic. We then insert the
3351  * slab into the proper nodelist and then allocate from it.
3352  */
3353 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3354 {
3355 	struct zonelist *zonelist;
3356 	gfp_t local_flags;
3357 	struct zoneref *z;
3358 	struct zone *zone;
3359 	enum zone_type high_zoneidx = gfp_zone(flags);
3360 	void *obj = NULL;
3361 	int nid;
3362 	unsigned int cpuset_mems_cookie;
3363 
3364 	if (flags & __GFP_THISNODE)
3365 		return NULL;
3366 
3367 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3368 
3369 retry_cpuset:
3370 	cpuset_mems_cookie = get_mems_allowed();
3371 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3372 
3373 retry:
3374 	/*
3375 	 * Look through allowed nodes for objects available
3376 	 * from existing per node queues.
3377 	 */
3378 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3379 		nid = zone_to_nid(zone);
3380 
3381 		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3382 			cache->nodelists[nid] &&
3383 			cache->nodelists[nid]->free_objects) {
3384 				obj = ____cache_alloc_node(cache,
3385 					flags | GFP_THISNODE, nid);
3386 				if (obj)
3387 					break;
3388 		}
3389 	}
3390 
3391 	if (!obj) {
3392 		/*
3393 		 * This allocation will be performed within the constraints
3394 		 * of the current cpuset / memory policy requirements.
3395 		 * We may trigger various forms of reclaim on the allowed
3396 		 * set and go into memory reserves if necessary.
3397 		 */
3398 		if (local_flags & __GFP_WAIT)
3399 			local_irq_enable();
3400 		kmem_flagcheck(cache, flags);
3401 		obj = kmem_getpages(cache, local_flags, numa_mem_id());
3402 		if (local_flags & __GFP_WAIT)
3403 			local_irq_disable();
3404 		if (obj) {
3405 			/*
3406 			 * Insert into the appropriate per node queues
3407 			 */
3408 			nid = page_to_nid(virt_to_page(obj));
3409 			if (cache_grow(cache, flags, nid, obj)) {
3410 				obj = ____cache_alloc_node(cache,
3411 					flags | GFP_THISNODE, nid);
3412 				if (!obj)
3413 					/*
3414 					 * Another processor may allocate the
3415 					 * objects in the slab since we are
3416 					 * not holding any locks.
3417 					 */
3418 					goto retry;
3419 			} else {
3420 				/* cache_grow already freed obj */
3421 				obj = NULL;
3422 			}
3423 		}
3424 	}
3425 
3426 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
3427 		goto retry_cpuset;
3428 	return obj;
3429 }
3430 
3431 /*
3432  * A interface to enable slab creation on nodeid
3433  */
3434 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3435 				int nodeid)
3436 {
3437 	struct list_head *entry;
3438 	struct slab *slabp;
3439 	struct kmem_list3 *l3;
3440 	void *obj;
3441 	int x;
3442 
3443 	l3 = cachep->nodelists[nodeid];
3444 	BUG_ON(!l3);
3445 
3446 retry:
3447 	check_irq_off();
3448 	spin_lock(&l3->list_lock);
3449 	entry = l3->slabs_partial.next;
3450 	if (entry == &l3->slabs_partial) {
3451 		l3->free_touched = 1;
3452 		entry = l3->slabs_free.next;
3453 		if (entry == &l3->slabs_free)
3454 			goto must_grow;
3455 	}
3456 
3457 	slabp = list_entry(entry, struct slab, list);
3458 	check_spinlock_acquired_node(cachep, nodeid);
3459 	check_slabp(cachep, slabp);
3460 
3461 	STATS_INC_NODEALLOCS(cachep);
3462 	STATS_INC_ACTIVE(cachep);
3463 	STATS_SET_HIGH(cachep);
3464 
3465 	BUG_ON(slabp->inuse == cachep->num);
3466 
3467 	obj = slab_get_obj(cachep, slabp, nodeid);
3468 	check_slabp(cachep, slabp);
3469 	l3->free_objects--;
3470 	/* move slabp to correct slabp list: */
3471 	list_del(&slabp->list);
3472 
3473 	if (slabp->free == BUFCTL_END)
3474 		list_add(&slabp->list, &l3->slabs_full);
3475 	else
3476 		list_add(&slabp->list, &l3->slabs_partial);
3477 
3478 	spin_unlock(&l3->list_lock);
3479 	goto done;
3480 
3481 must_grow:
3482 	spin_unlock(&l3->list_lock);
3483 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3484 	if (x)
3485 		goto retry;
3486 
3487 	return fallback_alloc(cachep, flags);
3488 
3489 done:
3490 	return obj;
3491 }
3492 
3493 /**
3494  * kmem_cache_alloc_node - Allocate an object on the specified node
3495  * @cachep: The cache to allocate from.
3496  * @flags: See kmalloc().
3497  * @nodeid: node number of the target node.
3498  * @caller: return address of caller, used for debug information
3499  *
3500  * Identical to kmem_cache_alloc but it will allocate memory on the given
3501  * node, which can improve the performance for cpu bound structures.
3502  *
3503  * Fallback to other node is possible if __GFP_THISNODE is not set.
3504  */
3505 static __always_inline void *
3506 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3507 		   void *caller)
3508 {
3509 	unsigned long save_flags;
3510 	void *ptr;
3511 	int slab_node = numa_mem_id();
3512 
3513 	flags &= gfp_allowed_mask;
3514 
3515 	lockdep_trace_alloc(flags);
3516 
3517 	if (slab_should_failslab(cachep, flags))
3518 		return NULL;
3519 
3520 	cache_alloc_debugcheck_before(cachep, flags);
3521 	local_irq_save(save_flags);
3522 
3523 	if (nodeid == NUMA_NO_NODE)
3524 		nodeid = slab_node;
3525 
3526 	if (unlikely(!cachep->nodelists[nodeid])) {
3527 		/* Node not bootstrapped yet */
3528 		ptr = fallback_alloc(cachep, flags);
3529 		goto out;
3530 	}
3531 
3532 	if (nodeid == slab_node) {
3533 		/*
3534 		 * Use the locally cached objects if possible.
3535 		 * However ____cache_alloc does not allow fallback
3536 		 * to other nodes. It may fail while we still have
3537 		 * objects on other nodes available.
3538 		 */
3539 		ptr = ____cache_alloc(cachep, flags);
3540 		if (ptr)
3541 			goto out;
3542 	}
3543 	/* ___cache_alloc_node can fall back to other nodes */
3544 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3545   out:
3546 	local_irq_restore(save_flags);
3547 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3548 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3549 				 flags);
3550 
3551 	if (likely(ptr))
3552 		kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3553 
3554 	if (unlikely((flags & __GFP_ZERO) && ptr))
3555 		memset(ptr, 0, obj_size(cachep));
3556 
3557 	return ptr;
3558 }
3559 
3560 static __always_inline void *
3561 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3562 {
3563 	void *objp;
3564 
3565 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3566 		objp = alternate_node_alloc(cache, flags);
3567 		if (objp)
3568 			goto out;
3569 	}
3570 	objp = ____cache_alloc(cache, flags);
3571 
3572 	/*
3573 	 * We may just have run out of memory on the local node.
3574 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3575 	 */
3576 	if (!objp)
3577 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3578 
3579   out:
3580 	return objp;
3581 }
3582 #else
3583 
3584 static __always_inline void *
3585 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3586 {
3587 	return ____cache_alloc(cachep, flags);
3588 }
3589 
3590 #endif /* CONFIG_NUMA */
3591 
3592 static __always_inline void *
3593 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3594 {
3595 	unsigned long save_flags;
3596 	void *objp;
3597 
3598 	flags &= gfp_allowed_mask;
3599 
3600 	lockdep_trace_alloc(flags);
3601 
3602 	if (slab_should_failslab(cachep, flags))
3603 		return NULL;
3604 
3605 	cache_alloc_debugcheck_before(cachep, flags);
3606 	local_irq_save(save_flags);
3607 	objp = __do_cache_alloc(cachep, flags);
3608 	local_irq_restore(save_flags);
3609 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3610 	kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3611 				 flags);
3612 	prefetchw(objp);
3613 
3614 	if (likely(objp))
3615 		kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3616 
3617 	if (unlikely((flags & __GFP_ZERO) && objp))
3618 		memset(objp, 0, obj_size(cachep));
3619 
3620 	return objp;
3621 }
3622 
3623 /*
3624  * Caller needs to acquire correct kmem_list's list_lock
3625  */
3626 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3627 		       int node)
3628 {
3629 	int i;
3630 	struct kmem_list3 *l3;
3631 
3632 	for (i = 0; i < nr_objects; i++) {
3633 		void *objp = objpp[i];
3634 		struct slab *slabp;
3635 
3636 		slabp = virt_to_slab(objp);
3637 		l3 = cachep->nodelists[node];
3638 		list_del(&slabp->list);
3639 		check_spinlock_acquired_node(cachep, node);
3640 		check_slabp(cachep, slabp);
3641 		slab_put_obj(cachep, slabp, objp, node);
3642 		STATS_DEC_ACTIVE(cachep);
3643 		l3->free_objects++;
3644 		check_slabp(cachep, slabp);
3645 
3646 		/* fixup slab chains */
3647 		if (slabp->inuse == 0) {
3648 			if (l3->free_objects > l3->free_limit) {
3649 				l3->free_objects -= cachep->num;
3650 				/* No need to drop any previously held
3651 				 * lock here, even if we have a off-slab slab
3652 				 * descriptor it is guaranteed to come from
3653 				 * a different cache, refer to comments before
3654 				 * alloc_slabmgmt.
3655 				 */
3656 				slab_destroy(cachep, slabp);
3657 			} else {
3658 				list_add(&slabp->list, &l3->slabs_free);
3659 			}
3660 		} else {
3661 			/* Unconditionally move a slab to the end of the
3662 			 * partial list on free - maximum time for the
3663 			 * other objects to be freed, too.
3664 			 */
3665 			list_add_tail(&slabp->list, &l3->slabs_partial);
3666 		}
3667 	}
3668 }
3669 
3670 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3671 {
3672 	int batchcount;
3673 	struct kmem_list3 *l3;
3674 	int node = numa_mem_id();
3675 
3676 	batchcount = ac->batchcount;
3677 #if DEBUG
3678 	BUG_ON(!batchcount || batchcount > ac->avail);
3679 #endif
3680 	check_irq_off();
3681 	l3 = cachep->nodelists[node];
3682 	spin_lock(&l3->list_lock);
3683 	if (l3->shared) {
3684 		struct array_cache *shared_array = l3->shared;
3685 		int max = shared_array->limit - shared_array->avail;
3686 		if (max) {
3687 			if (batchcount > max)
3688 				batchcount = max;
3689 			memcpy(&(shared_array->entry[shared_array->avail]),
3690 			       ac->entry, sizeof(void *) * batchcount);
3691 			shared_array->avail += batchcount;
3692 			goto free_done;
3693 		}
3694 	}
3695 
3696 	free_block(cachep, ac->entry, batchcount, node);
3697 free_done:
3698 #if STATS
3699 	{
3700 		int i = 0;
3701 		struct list_head *p;
3702 
3703 		p = l3->slabs_free.next;
3704 		while (p != &(l3->slabs_free)) {
3705 			struct slab *slabp;
3706 
3707 			slabp = list_entry(p, struct slab, list);
3708 			BUG_ON(slabp->inuse);
3709 
3710 			i++;
3711 			p = p->next;
3712 		}
3713 		STATS_SET_FREEABLE(cachep, i);
3714 	}
3715 #endif
3716 	spin_unlock(&l3->list_lock);
3717 	ac->avail -= batchcount;
3718 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3719 }
3720 
3721 /*
3722  * Release an obj back to its cache. If the obj has a constructed state, it must
3723  * be in this state _before_ it is released.  Called with disabled ints.
3724  */
3725 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3726     void *caller)
3727 {
3728 	struct array_cache *ac = cpu_cache_get(cachep);
3729 
3730 	check_irq_off();
3731 	kmemleak_free_recursive(objp, cachep->flags);
3732 	objp = cache_free_debugcheck(cachep, objp, caller);
3733 
3734 	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3735 
3736 	/*
3737 	 * Skip calling cache_free_alien() when the platform is not numa.
3738 	 * This will avoid cache misses that happen while accessing slabp (which
3739 	 * is per page memory  reference) to get nodeid. Instead use a global
3740 	 * variable to skip the call, which is mostly likely to be present in
3741 	 * the cache.
3742 	 */
3743 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3744 		return;
3745 
3746 	if (likely(ac->avail < ac->limit)) {
3747 		STATS_INC_FREEHIT(cachep);
3748 	} else {
3749 		STATS_INC_FREEMISS(cachep);
3750 		cache_flusharray(cachep, ac);
3751 	}
3752 
3753 	ac->entry[ac->avail++] = objp;
3754 }
3755 
3756 /**
3757  * kmem_cache_alloc - Allocate an object
3758  * @cachep: The cache to allocate from.
3759  * @flags: See kmalloc().
3760  *
3761  * Allocate an object from this cache.  The flags are only relevant
3762  * if the cache has no available objects.
3763  */
3764 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3765 {
3766 	void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3767 
3768 	trace_kmem_cache_alloc(_RET_IP_, ret,
3769 			       obj_size(cachep), cachep->buffer_size, flags);
3770 
3771 	return ret;
3772 }
3773 EXPORT_SYMBOL(kmem_cache_alloc);
3774 
3775 #ifdef CONFIG_TRACING
3776 void *
3777 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3778 {
3779 	void *ret;
3780 
3781 	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3782 
3783 	trace_kmalloc(_RET_IP_, ret,
3784 		      size, slab_buffer_size(cachep), flags);
3785 	return ret;
3786 }
3787 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3788 #endif
3789 
3790 #ifdef CONFIG_NUMA
3791 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3792 {
3793 	void *ret = __cache_alloc_node(cachep, flags, nodeid,
3794 				       __builtin_return_address(0));
3795 
3796 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3797 				    obj_size(cachep), cachep->buffer_size,
3798 				    flags, nodeid);
3799 
3800 	return ret;
3801 }
3802 EXPORT_SYMBOL(kmem_cache_alloc_node);
3803 
3804 #ifdef CONFIG_TRACING
3805 void *kmem_cache_alloc_node_trace(size_t size,
3806 				  struct kmem_cache *cachep,
3807 				  gfp_t flags,
3808 				  int nodeid)
3809 {
3810 	void *ret;
3811 
3812 	ret = __cache_alloc_node(cachep, flags, nodeid,
3813 				  __builtin_return_address(0));
3814 	trace_kmalloc_node(_RET_IP_, ret,
3815 			   size, slab_buffer_size(cachep),
3816 			   flags, nodeid);
3817 	return ret;
3818 }
3819 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3820 #endif
3821 
3822 static __always_inline void *
3823 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3824 {
3825 	struct kmem_cache *cachep;
3826 
3827 	cachep = kmem_find_general_cachep(size, flags);
3828 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3829 		return cachep;
3830 	return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3831 }
3832 
3833 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3834 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3835 {
3836 	return __do_kmalloc_node(size, flags, node,
3837 			__builtin_return_address(0));
3838 }
3839 EXPORT_SYMBOL(__kmalloc_node);
3840 
3841 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3842 		int node, unsigned long caller)
3843 {
3844 	return __do_kmalloc_node(size, flags, node, (void *)caller);
3845 }
3846 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3847 #else
3848 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3849 {
3850 	return __do_kmalloc_node(size, flags, node, NULL);
3851 }
3852 EXPORT_SYMBOL(__kmalloc_node);
3853 #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3854 #endif /* CONFIG_NUMA */
3855 
3856 /**
3857  * __do_kmalloc - allocate memory
3858  * @size: how many bytes of memory are required.
3859  * @flags: the type of memory to allocate (see kmalloc).
3860  * @caller: function caller for debug tracking of the caller
3861  */
3862 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3863 					  void *caller)
3864 {
3865 	struct kmem_cache *cachep;
3866 	void *ret;
3867 
3868 	/* If you want to save a few bytes .text space: replace
3869 	 * __ with kmem_.
3870 	 * Then kmalloc uses the uninlined functions instead of the inline
3871 	 * functions.
3872 	 */
3873 	cachep = __find_general_cachep(size, flags);
3874 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3875 		return cachep;
3876 	ret = __cache_alloc(cachep, flags, caller);
3877 
3878 	trace_kmalloc((unsigned long) caller, ret,
3879 		      size, cachep->buffer_size, flags);
3880 
3881 	return ret;
3882 }
3883 
3884 
3885 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3886 void *__kmalloc(size_t size, gfp_t flags)
3887 {
3888 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3889 }
3890 EXPORT_SYMBOL(__kmalloc);
3891 
3892 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3893 {
3894 	return __do_kmalloc(size, flags, (void *)caller);
3895 }
3896 EXPORT_SYMBOL(__kmalloc_track_caller);
3897 
3898 #else
3899 void *__kmalloc(size_t size, gfp_t flags)
3900 {
3901 	return __do_kmalloc(size, flags, NULL);
3902 }
3903 EXPORT_SYMBOL(__kmalloc);
3904 #endif
3905 
3906 /**
3907  * kmem_cache_free - Deallocate an object
3908  * @cachep: The cache the allocation was from.
3909  * @objp: The previously allocated object.
3910  *
3911  * Free an object which was previously allocated from this
3912  * cache.
3913  */
3914 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3915 {
3916 	unsigned long flags;
3917 
3918 	local_irq_save(flags);
3919 	debug_check_no_locks_freed(objp, obj_size(cachep));
3920 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3921 		debug_check_no_obj_freed(objp, obj_size(cachep));
3922 	__cache_free(cachep, objp, __builtin_return_address(0));
3923 	local_irq_restore(flags);
3924 
3925 	trace_kmem_cache_free(_RET_IP_, objp);
3926 }
3927 EXPORT_SYMBOL(kmem_cache_free);
3928 
3929 /**
3930  * kfree - free previously allocated memory
3931  * @objp: pointer returned by kmalloc.
3932  *
3933  * If @objp is NULL, no operation is performed.
3934  *
3935  * Don't free memory not originally allocated by kmalloc()
3936  * or you will run into trouble.
3937  */
3938 void kfree(const void *objp)
3939 {
3940 	struct kmem_cache *c;
3941 	unsigned long flags;
3942 
3943 	trace_kfree(_RET_IP_, objp);
3944 
3945 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3946 		return;
3947 	local_irq_save(flags);
3948 	kfree_debugcheck(objp);
3949 	c = virt_to_cache(objp);
3950 	debug_check_no_locks_freed(objp, obj_size(c));
3951 	debug_check_no_obj_freed(objp, obj_size(c));
3952 	__cache_free(c, (void *)objp, __builtin_return_address(0));
3953 	local_irq_restore(flags);
3954 }
3955 EXPORT_SYMBOL(kfree);
3956 
3957 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3958 {
3959 	return obj_size(cachep);
3960 }
3961 EXPORT_SYMBOL(kmem_cache_size);
3962 
3963 /*
3964  * This initializes kmem_list3 or resizes various caches for all nodes.
3965  */
3966 static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3967 {
3968 	int node;
3969 	struct kmem_list3 *l3;
3970 	struct array_cache *new_shared;
3971 	struct array_cache **new_alien = NULL;
3972 
3973 	for_each_online_node(node) {
3974 
3975                 if (use_alien_caches) {
3976                         new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3977                         if (!new_alien)
3978                                 goto fail;
3979                 }
3980 
3981 		new_shared = NULL;
3982 		if (cachep->shared) {
3983 			new_shared = alloc_arraycache(node,
3984 				cachep->shared*cachep->batchcount,
3985 					0xbaadf00d, gfp);
3986 			if (!new_shared) {
3987 				free_alien_cache(new_alien);
3988 				goto fail;
3989 			}
3990 		}
3991 
3992 		l3 = cachep->nodelists[node];
3993 		if (l3) {
3994 			struct array_cache *shared = l3->shared;
3995 
3996 			spin_lock_irq(&l3->list_lock);
3997 
3998 			if (shared)
3999 				free_block(cachep, shared->entry,
4000 						shared->avail, node);
4001 
4002 			l3->shared = new_shared;
4003 			if (!l3->alien) {
4004 				l3->alien = new_alien;
4005 				new_alien = NULL;
4006 			}
4007 			l3->free_limit = (1 + nr_cpus_node(node)) *
4008 					cachep->batchcount + cachep->num;
4009 			spin_unlock_irq(&l3->list_lock);
4010 			kfree(shared);
4011 			free_alien_cache(new_alien);
4012 			continue;
4013 		}
4014 		l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
4015 		if (!l3) {
4016 			free_alien_cache(new_alien);
4017 			kfree(new_shared);
4018 			goto fail;
4019 		}
4020 
4021 		kmem_list3_init(l3);
4022 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
4023 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
4024 		l3->shared = new_shared;
4025 		l3->alien = new_alien;
4026 		l3->free_limit = (1 + nr_cpus_node(node)) *
4027 					cachep->batchcount + cachep->num;
4028 		cachep->nodelists[node] = l3;
4029 	}
4030 	return 0;
4031 
4032 fail:
4033 	if (!cachep->next.next) {
4034 		/* Cache is not active yet. Roll back what we did */
4035 		node--;
4036 		while (node >= 0) {
4037 			if (cachep->nodelists[node]) {
4038 				l3 = cachep->nodelists[node];
4039 
4040 				kfree(l3->shared);
4041 				free_alien_cache(l3->alien);
4042 				kfree(l3);
4043 				cachep->nodelists[node] = NULL;
4044 			}
4045 			node--;
4046 		}
4047 	}
4048 	return -ENOMEM;
4049 }
4050 
4051 struct ccupdate_struct {
4052 	struct kmem_cache *cachep;
4053 	struct array_cache *new[0];
4054 };
4055 
4056 static void do_ccupdate_local(void *info)
4057 {
4058 	struct ccupdate_struct *new = info;
4059 	struct array_cache *old;
4060 
4061 	check_irq_off();
4062 	old = cpu_cache_get(new->cachep);
4063 
4064 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
4065 	new->new[smp_processor_id()] = old;
4066 }
4067 
4068 /* Always called with the cache_chain_mutex held */
4069 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4070 				int batchcount, int shared, gfp_t gfp)
4071 {
4072 	struct ccupdate_struct *new;
4073 	int i;
4074 
4075 	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
4076 		      gfp);
4077 	if (!new)
4078 		return -ENOMEM;
4079 
4080 	for_each_online_cpu(i) {
4081 		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
4082 						batchcount, gfp);
4083 		if (!new->new[i]) {
4084 			for (i--; i >= 0; i--)
4085 				kfree(new->new[i]);
4086 			kfree(new);
4087 			return -ENOMEM;
4088 		}
4089 	}
4090 	new->cachep = cachep;
4091 
4092 	on_each_cpu(do_ccupdate_local, (void *)new, 1);
4093 
4094 	check_irq_on();
4095 	cachep->batchcount = batchcount;
4096 	cachep->limit = limit;
4097 	cachep->shared = shared;
4098 
4099 	for_each_online_cpu(i) {
4100 		struct array_cache *ccold = new->new[i];
4101 		if (!ccold)
4102 			continue;
4103 		spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4104 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4105 		spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4106 		kfree(ccold);
4107 	}
4108 	kfree(new);
4109 	return alloc_kmemlist(cachep, gfp);
4110 }
4111 
4112 /* Called with cache_chain_mutex held always */
4113 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4114 {
4115 	int err;
4116 	int limit, shared;
4117 
4118 	/*
4119 	 * The head array serves three purposes:
4120 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4121 	 * - reduce the number of spinlock operations.
4122 	 * - reduce the number of linked list operations on the slab and
4123 	 *   bufctl chains: array operations are cheaper.
4124 	 * The numbers are guessed, we should auto-tune as described by
4125 	 * Bonwick.
4126 	 */
4127 	if (cachep->buffer_size > 131072)
4128 		limit = 1;
4129 	else if (cachep->buffer_size > PAGE_SIZE)
4130 		limit = 8;
4131 	else if (cachep->buffer_size > 1024)
4132 		limit = 24;
4133 	else if (cachep->buffer_size > 256)
4134 		limit = 54;
4135 	else
4136 		limit = 120;
4137 
4138 	/*
4139 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4140 	 * allocation behaviour: Most allocs on one cpu, most free operations
4141 	 * on another cpu. For these cases, an efficient object passing between
4142 	 * cpus is necessary. This is provided by a shared array. The array
4143 	 * replaces Bonwick's magazine layer.
4144 	 * On uniprocessor, it's functionally equivalent (but less efficient)
4145 	 * to a larger limit. Thus disabled by default.
4146 	 */
4147 	shared = 0;
4148 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4149 		shared = 8;
4150 
4151 #if DEBUG
4152 	/*
4153 	 * With debugging enabled, large batchcount lead to excessively long
4154 	 * periods with disabled local interrupts. Limit the batchcount
4155 	 */
4156 	if (limit > 32)
4157 		limit = 32;
4158 #endif
4159 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
4160 	if (err)
4161 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4162 		       cachep->name, -err);
4163 	return err;
4164 }
4165 
4166 /*
4167  * Drain an array if it contains any elements taking the l3 lock only if
4168  * necessary. Note that the l3 listlock also protects the array_cache
4169  * if drain_array() is used on the shared array.
4170  */
4171 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4172 			 struct array_cache *ac, int force, int node)
4173 {
4174 	int tofree;
4175 
4176 	if (!ac || !ac->avail)
4177 		return;
4178 	if (ac->touched && !force) {
4179 		ac->touched = 0;
4180 	} else {
4181 		spin_lock_irq(&l3->list_lock);
4182 		if (ac->avail) {
4183 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4184 			if (tofree > ac->avail)
4185 				tofree = (ac->avail + 1) / 2;
4186 			free_block(cachep, ac->entry, tofree, node);
4187 			ac->avail -= tofree;
4188 			memmove(ac->entry, &(ac->entry[tofree]),
4189 				sizeof(void *) * ac->avail);
4190 		}
4191 		spin_unlock_irq(&l3->list_lock);
4192 	}
4193 }
4194 
4195 /**
4196  * cache_reap - Reclaim memory from caches.
4197  * @w: work descriptor
4198  *
4199  * Called from workqueue/eventd every few seconds.
4200  * Purpose:
4201  * - clear the per-cpu caches for this CPU.
4202  * - return freeable pages to the main free memory pool.
4203  *
4204  * If we cannot acquire the cache chain mutex then just give up - we'll try
4205  * again on the next iteration.
4206  */
4207 static void cache_reap(struct work_struct *w)
4208 {
4209 	struct kmem_cache *searchp;
4210 	struct kmem_list3 *l3;
4211 	int node = numa_mem_id();
4212 	struct delayed_work *work = to_delayed_work(w);
4213 
4214 	if (!mutex_trylock(&cache_chain_mutex))
4215 		/* Give up. Setup the next iteration. */
4216 		goto out;
4217 
4218 	list_for_each_entry(searchp, &cache_chain, next) {
4219 		check_irq_on();
4220 
4221 		/*
4222 		 * We only take the l3 lock if absolutely necessary and we
4223 		 * have established with reasonable certainty that
4224 		 * we can do some work if the lock was obtained.
4225 		 */
4226 		l3 = searchp->nodelists[node];
4227 
4228 		reap_alien(searchp, l3);
4229 
4230 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4231 
4232 		/*
4233 		 * These are racy checks but it does not matter
4234 		 * if we skip one check or scan twice.
4235 		 */
4236 		if (time_after(l3->next_reap, jiffies))
4237 			goto next;
4238 
4239 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4240 
4241 		drain_array(searchp, l3, l3->shared, 0, node);
4242 
4243 		if (l3->free_touched)
4244 			l3->free_touched = 0;
4245 		else {
4246 			int freed;
4247 
4248 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4249 				5 * searchp->num - 1) / (5 * searchp->num));
4250 			STATS_ADD_REAPED(searchp, freed);
4251 		}
4252 next:
4253 		cond_resched();
4254 	}
4255 	check_irq_on();
4256 	mutex_unlock(&cache_chain_mutex);
4257 	next_reap_node();
4258 out:
4259 	/* Set up the next iteration */
4260 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4261 }
4262 
4263 #ifdef CONFIG_SLABINFO
4264 
4265 static void print_slabinfo_header(struct seq_file *m)
4266 {
4267 	/*
4268 	 * Output format version, so at least we can change it
4269 	 * without _too_ many complaints.
4270 	 */
4271 #if STATS
4272 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4273 #else
4274 	seq_puts(m, "slabinfo - version: 2.1\n");
4275 #endif
4276 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4277 		 "<objperslab> <pagesperslab>");
4278 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4279 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4280 #if STATS
4281 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4282 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4283 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4284 #endif
4285 	seq_putc(m, '\n');
4286 }
4287 
4288 static void *s_start(struct seq_file *m, loff_t *pos)
4289 {
4290 	loff_t n = *pos;
4291 
4292 	mutex_lock(&cache_chain_mutex);
4293 	if (!n)
4294 		print_slabinfo_header(m);
4295 
4296 	return seq_list_start(&cache_chain, *pos);
4297 }
4298 
4299 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4300 {
4301 	return seq_list_next(p, &cache_chain, pos);
4302 }
4303 
4304 static void s_stop(struct seq_file *m, void *p)
4305 {
4306 	mutex_unlock(&cache_chain_mutex);
4307 }
4308 
4309 static int s_show(struct seq_file *m, void *p)
4310 {
4311 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4312 	struct slab *slabp;
4313 	unsigned long active_objs;
4314 	unsigned long num_objs;
4315 	unsigned long active_slabs = 0;
4316 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4317 	const char *name;
4318 	char *error = NULL;
4319 	int node;
4320 	struct kmem_list3 *l3;
4321 
4322 	active_objs = 0;
4323 	num_slabs = 0;
4324 	for_each_online_node(node) {
4325 		l3 = cachep->nodelists[node];
4326 		if (!l3)
4327 			continue;
4328 
4329 		check_irq_on();
4330 		spin_lock_irq(&l3->list_lock);
4331 
4332 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4333 			if (slabp->inuse != cachep->num && !error)
4334 				error = "slabs_full accounting error";
4335 			active_objs += cachep->num;
4336 			active_slabs++;
4337 		}
4338 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4339 			if (slabp->inuse == cachep->num && !error)
4340 				error = "slabs_partial inuse accounting error";
4341 			if (!slabp->inuse && !error)
4342 				error = "slabs_partial/inuse accounting error";
4343 			active_objs += slabp->inuse;
4344 			active_slabs++;
4345 		}
4346 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4347 			if (slabp->inuse && !error)
4348 				error = "slabs_free/inuse accounting error";
4349 			num_slabs++;
4350 		}
4351 		free_objects += l3->free_objects;
4352 		if (l3->shared)
4353 			shared_avail += l3->shared->avail;
4354 
4355 		spin_unlock_irq(&l3->list_lock);
4356 	}
4357 	num_slabs += active_slabs;
4358 	num_objs = num_slabs * cachep->num;
4359 	if (num_objs - active_objs != free_objects && !error)
4360 		error = "free_objects accounting error";
4361 
4362 	name = cachep->name;
4363 	if (error)
4364 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4365 
4366 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4367 		   name, active_objs, num_objs, cachep->buffer_size,
4368 		   cachep->num, (1 << cachep->gfporder));
4369 	seq_printf(m, " : tunables %4u %4u %4u",
4370 		   cachep->limit, cachep->batchcount, cachep->shared);
4371 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4372 		   active_slabs, num_slabs, shared_avail);
4373 #if STATS
4374 	{			/* list3 stats */
4375 		unsigned long high = cachep->high_mark;
4376 		unsigned long allocs = cachep->num_allocations;
4377 		unsigned long grown = cachep->grown;
4378 		unsigned long reaped = cachep->reaped;
4379 		unsigned long errors = cachep->errors;
4380 		unsigned long max_freeable = cachep->max_freeable;
4381 		unsigned long node_allocs = cachep->node_allocs;
4382 		unsigned long node_frees = cachep->node_frees;
4383 		unsigned long overflows = cachep->node_overflow;
4384 
4385 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4386 			   "%4lu %4lu %4lu %4lu %4lu",
4387 			   allocs, high, grown,
4388 			   reaped, errors, max_freeable, node_allocs,
4389 			   node_frees, overflows);
4390 	}
4391 	/* cpu stats */
4392 	{
4393 		unsigned long allochit = atomic_read(&cachep->allochit);
4394 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4395 		unsigned long freehit = atomic_read(&cachep->freehit);
4396 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4397 
4398 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4399 			   allochit, allocmiss, freehit, freemiss);
4400 	}
4401 #endif
4402 	seq_putc(m, '\n');
4403 	return 0;
4404 }
4405 
4406 /*
4407  * slabinfo_op - iterator that generates /proc/slabinfo
4408  *
4409  * Output layout:
4410  * cache-name
4411  * num-active-objs
4412  * total-objs
4413  * object size
4414  * num-active-slabs
4415  * total-slabs
4416  * num-pages-per-slab
4417  * + further values on SMP and with statistics enabled
4418  */
4419 
4420 static const struct seq_operations slabinfo_op = {
4421 	.start = s_start,
4422 	.next = s_next,
4423 	.stop = s_stop,
4424 	.show = s_show,
4425 };
4426 
4427 #define MAX_SLABINFO_WRITE 128
4428 /**
4429  * slabinfo_write - Tuning for the slab allocator
4430  * @file: unused
4431  * @buffer: user buffer
4432  * @count: data length
4433  * @ppos: unused
4434  */
4435 static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4436 		       size_t count, loff_t *ppos)
4437 {
4438 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4439 	int limit, batchcount, shared, res;
4440 	struct kmem_cache *cachep;
4441 
4442 	if (count > MAX_SLABINFO_WRITE)
4443 		return -EINVAL;
4444 	if (copy_from_user(&kbuf, buffer, count))
4445 		return -EFAULT;
4446 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4447 
4448 	tmp = strchr(kbuf, ' ');
4449 	if (!tmp)
4450 		return -EINVAL;
4451 	*tmp = '\0';
4452 	tmp++;
4453 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4454 		return -EINVAL;
4455 
4456 	/* Find the cache in the chain of caches. */
4457 	mutex_lock(&cache_chain_mutex);
4458 	res = -EINVAL;
4459 	list_for_each_entry(cachep, &cache_chain, next) {
4460 		if (!strcmp(cachep->name, kbuf)) {
4461 			if (limit < 1 || batchcount < 1 ||
4462 					batchcount > limit || shared < 0) {
4463 				res = 0;
4464 			} else {
4465 				res = do_tune_cpucache(cachep, limit,
4466 						       batchcount, shared,
4467 						       GFP_KERNEL);
4468 			}
4469 			break;
4470 		}
4471 	}
4472 	mutex_unlock(&cache_chain_mutex);
4473 	if (res >= 0)
4474 		res = count;
4475 	return res;
4476 }
4477 
4478 static int slabinfo_open(struct inode *inode, struct file *file)
4479 {
4480 	return seq_open(file, &slabinfo_op);
4481 }
4482 
4483 static const struct file_operations proc_slabinfo_operations = {
4484 	.open		= slabinfo_open,
4485 	.read		= seq_read,
4486 	.write		= slabinfo_write,
4487 	.llseek		= seq_lseek,
4488 	.release	= seq_release,
4489 };
4490 
4491 #ifdef CONFIG_DEBUG_SLAB_LEAK
4492 
4493 static void *leaks_start(struct seq_file *m, loff_t *pos)
4494 {
4495 	mutex_lock(&cache_chain_mutex);
4496 	return seq_list_start(&cache_chain, *pos);
4497 }
4498 
4499 static inline int add_caller(unsigned long *n, unsigned long v)
4500 {
4501 	unsigned long *p;
4502 	int l;
4503 	if (!v)
4504 		return 1;
4505 	l = n[1];
4506 	p = n + 2;
4507 	while (l) {
4508 		int i = l/2;
4509 		unsigned long *q = p + 2 * i;
4510 		if (*q == v) {
4511 			q[1]++;
4512 			return 1;
4513 		}
4514 		if (*q > v) {
4515 			l = i;
4516 		} else {
4517 			p = q + 2;
4518 			l -= i + 1;
4519 		}
4520 	}
4521 	if (++n[1] == n[0])
4522 		return 0;
4523 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4524 	p[0] = v;
4525 	p[1] = 1;
4526 	return 1;
4527 }
4528 
4529 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4530 {
4531 	void *p;
4532 	int i;
4533 	if (n[0] == n[1])
4534 		return;
4535 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4536 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4537 			continue;
4538 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4539 			return;
4540 	}
4541 }
4542 
4543 static void show_symbol(struct seq_file *m, unsigned long address)
4544 {
4545 #ifdef CONFIG_KALLSYMS
4546 	unsigned long offset, size;
4547 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4548 
4549 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4550 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4551 		if (modname[0])
4552 			seq_printf(m, " [%s]", modname);
4553 		return;
4554 	}
4555 #endif
4556 	seq_printf(m, "%p", (void *)address);
4557 }
4558 
4559 static int leaks_show(struct seq_file *m, void *p)
4560 {
4561 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4562 	struct slab *slabp;
4563 	struct kmem_list3 *l3;
4564 	const char *name;
4565 	unsigned long *n = m->private;
4566 	int node;
4567 	int i;
4568 
4569 	if (!(cachep->flags & SLAB_STORE_USER))
4570 		return 0;
4571 	if (!(cachep->flags & SLAB_RED_ZONE))
4572 		return 0;
4573 
4574 	/* OK, we can do it */
4575 
4576 	n[1] = 0;
4577 
4578 	for_each_online_node(node) {
4579 		l3 = cachep->nodelists[node];
4580 		if (!l3)
4581 			continue;
4582 
4583 		check_irq_on();
4584 		spin_lock_irq(&l3->list_lock);
4585 
4586 		list_for_each_entry(slabp, &l3->slabs_full, list)
4587 			handle_slab(n, cachep, slabp);
4588 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4589 			handle_slab(n, cachep, slabp);
4590 		spin_unlock_irq(&l3->list_lock);
4591 	}
4592 	name = cachep->name;
4593 	if (n[0] == n[1]) {
4594 		/* Increase the buffer size */
4595 		mutex_unlock(&cache_chain_mutex);
4596 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4597 		if (!m->private) {
4598 			/* Too bad, we are really out */
4599 			m->private = n;
4600 			mutex_lock(&cache_chain_mutex);
4601 			return -ENOMEM;
4602 		}
4603 		*(unsigned long *)m->private = n[0] * 2;
4604 		kfree(n);
4605 		mutex_lock(&cache_chain_mutex);
4606 		/* Now make sure this entry will be retried */
4607 		m->count = m->size;
4608 		return 0;
4609 	}
4610 	for (i = 0; i < n[1]; i++) {
4611 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4612 		show_symbol(m, n[2*i+2]);
4613 		seq_putc(m, '\n');
4614 	}
4615 
4616 	return 0;
4617 }
4618 
4619 static const struct seq_operations slabstats_op = {
4620 	.start = leaks_start,
4621 	.next = s_next,
4622 	.stop = s_stop,
4623 	.show = leaks_show,
4624 };
4625 
4626 static int slabstats_open(struct inode *inode, struct file *file)
4627 {
4628 	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4629 	int ret = -ENOMEM;
4630 	if (n) {
4631 		ret = seq_open(file, &slabstats_op);
4632 		if (!ret) {
4633 			struct seq_file *m = file->private_data;
4634 			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4635 			m->private = n;
4636 			n = NULL;
4637 		}
4638 		kfree(n);
4639 	}
4640 	return ret;
4641 }
4642 
4643 static const struct file_operations proc_slabstats_operations = {
4644 	.open		= slabstats_open,
4645 	.read		= seq_read,
4646 	.llseek		= seq_lseek,
4647 	.release	= seq_release_private,
4648 };
4649 #endif
4650 
4651 static int __init slab_proc_init(void)
4652 {
4653 	proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
4654 #ifdef CONFIG_DEBUG_SLAB_LEAK
4655 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4656 #endif
4657 	return 0;
4658 }
4659 module_init(slab_proc_init);
4660 #endif
4661 
4662 /**
4663  * ksize - get the actual amount of memory allocated for a given object
4664  * @objp: Pointer to the object
4665  *
4666  * kmalloc may internally round up allocations and return more memory
4667  * than requested. ksize() can be used to determine the actual amount of
4668  * memory allocated. The caller may use this additional memory, even though
4669  * a smaller amount of memory was initially specified with the kmalloc call.
4670  * The caller must guarantee that objp points to a valid object previously
4671  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4672  * must not be freed during the duration of the call.
4673  */
4674 size_t ksize(const void *objp)
4675 {
4676 	BUG_ON(!objp);
4677 	if (unlikely(objp == ZERO_SIZE_PTR))
4678 		return 0;
4679 
4680 	return obj_size(virt_to_cache(objp));
4681 }
4682 EXPORT_SYMBOL(ksize);
4683