xref: /openbmc/linux/mm/slab.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/seq_file.h>
99 #include	<linux/notifier.h>
100 #include	<linux/kallsyms.h>
101 #include	<linux/cpu.h>
102 #include	<linux/sysctl.h>
103 #include	<linux/module.h>
104 #include	<linux/rcupdate.h>
105 #include	<linux/string.h>
106 #include	<linux/uaccess.h>
107 #include	<linux/nodemask.h>
108 #include	<linux/mempolicy.h>
109 #include	<linux/mutex.h>
110 #include	<linux/fault-inject.h>
111 #include	<linux/rtmutex.h>
112 #include	<linux/reciprocal_div.h>
113 #include	<linux/debugobjects.h>
114 
115 #include	<asm/cacheflush.h>
116 #include	<asm/tlbflush.h>
117 #include	<asm/page.h>
118 
119 /*
120  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
121  *		  0 for faster, smaller code (especially in the critical paths).
122  *
123  * STATS	- 1 to collect stats for /proc/slabinfo.
124  *		  0 for faster, smaller code (especially in the critical paths).
125  *
126  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
127  */
128 
129 #ifdef CONFIG_DEBUG_SLAB
130 #define	DEBUG		1
131 #define	STATS		1
132 #define	FORCED_DEBUG	1
133 #else
134 #define	DEBUG		0
135 #define	STATS		0
136 #define	FORCED_DEBUG	0
137 #endif
138 
139 /* Shouldn't this be in a header file somewhere? */
140 #define	BYTES_PER_WORD		sizeof(void *)
141 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
142 
143 #ifndef ARCH_KMALLOC_MINALIGN
144 /*
145  * Enforce a minimum alignment for the kmalloc caches.
146  * Usually, the kmalloc caches are cache_line_size() aligned, except when
147  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
148  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
149  * alignment larger than the alignment of a 64-bit integer.
150  * ARCH_KMALLOC_MINALIGN allows that.
151  * Note that increasing this value may disable some debug features.
152  */
153 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
154 #endif
155 
156 #ifndef ARCH_SLAB_MINALIGN
157 /*
158  * Enforce a minimum alignment for all caches.
159  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
160  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
161  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
162  * some debug features.
163  */
164 #define ARCH_SLAB_MINALIGN 0
165 #endif
166 
167 #ifndef ARCH_KMALLOC_FLAGS
168 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
169 #endif
170 
171 /* Legal flag mask for kmem_cache_create(). */
172 #if DEBUG
173 # define CREATE_MASK	(SLAB_RED_ZONE | \
174 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
175 			 SLAB_CACHE_DMA | \
176 			 SLAB_STORE_USER | \
177 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
178 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
179 			 SLAB_DEBUG_OBJECTS)
180 #else
181 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
182 			 SLAB_CACHE_DMA | \
183 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
184 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
185 			 SLAB_DEBUG_OBJECTS)
186 #endif
187 
188 /*
189  * kmem_bufctl_t:
190  *
191  * Bufctl's are used for linking objs within a slab
192  * linked offsets.
193  *
194  * This implementation relies on "struct page" for locating the cache &
195  * slab an object belongs to.
196  * This allows the bufctl structure to be small (one int), but limits
197  * the number of objects a slab (not a cache) can contain when off-slab
198  * bufctls are used. The limit is the size of the largest general cache
199  * that does not use off-slab slabs.
200  * For 32bit archs with 4 kB pages, is this 56.
201  * This is not serious, as it is only for large objects, when it is unwise
202  * to have too many per slab.
203  * Note: This limit can be raised by introducing a general cache whose size
204  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
205  */
206 
207 typedef unsigned int kmem_bufctl_t;
208 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
209 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
210 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
211 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
212 
213 /*
214  * struct slab
215  *
216  * Manages the objs in a slab. Placed either at the beginning of mem allocated
217  * for a slab, or allocated from an general cache.
218  * Slabs are chained into three list: fully used, partial, fully free slabs.
219  */
220 struct slab {
221 	struct list_head list;
222 	unsigned long colouroff;
223 	void *s_mem;		/* including colour offset */
224 	unsigned int inuse;	/* num of objs active in slab */
225 	kmem_bufctl_t free;
226 	unsigned short nodeid;
227 };
228 
229 /*
230  * struct slab_rcu
231  *
232  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
233  * arrange for kmem_freepages to be called via RCU.  This is useful if
234  * we need to approach a kernel structure obliquely, from its address
235  * obtained without the usual locking.  We can lock the structure to
236  * stabilize it and check it's still at the given address, only if we
237  * can be sure that the memory has not been meanwhile reused for some
238  * other kind of object (which our subsystem's lock might corrupt).
239  *
240  * rcu_read_lock before reading the address, then rcu_read_unlock after
241  * taking the spinlock within the structure expected at that address.
242  *
243  * We assume struct slab_rcu can overlay struct slab when destroying.
244  */
245 struct slab_rcu {
246 	struct rcu_head head;
247 	struct kmem_cache *cachep;
248 	void *addr;
249 };
250 
251 /*
252  * struct array_cache
253  *
254  * Purpose:
255  * - LIFO ordering, to hand out cache-warm objects from _alloc
256  * - reduce the number of linked list operations
257  * - reduce spinlock operations
258  *
259  * The limit is stored in the per-cpu structure to reduce the data cache
260  * footprint.
261  *
262  */
263 struct array_cache {
264 	unsigned int avail;
265 	unsigned int limit;
266 	unsigned int batchcount;
267 	unsigned int touched;
268 	spinlock_t lock;
269 	void *entry[];	/*
270 			 * Must have this definition in here for the proper
271 			 * alignment of array_cache. Also simplifies accessing
272 			 * the entries.
273 			 */
274 };
275 
276 /*
277  * bootstrap: The caches do not work without cpuarrays anymore, but the
278  * cpuarrays are allocated from the generic caches...
279  */
280 #define BOOT_CPUCACHE_ENTRIES	1
281 struct arraycache_init {
282 	struct array_cache cache;
283 	void *entries[BOOT_CPUCACHE_ENTRIES];
284 };
285 
286 /*
287  * The slab lists for all objects.
288  */
289 struct kmem_list3 {
290 	struct list_head slabs_partial;	/* partial list first, better asm code */
291 	struct list_head slabs_full;
292 	struct list_head slabs_free;
293 	unsigned long free_objects;
294 	unsigned int free_limit;
295 	unsigned int colour_next;	/* Per-node cache coloring */
296 	spinlock_t list_lock;
297 	struct array_cache *shared;	/* shared per node */
298 	struct array_cache **alien;	/* on other nodes */
299 	unsigned long next_reap;	/* updated without locking */
300 	int free_touched;		/* updated without locking */
301 };
302 
303 /*
304  * Need this for bootstrapping a per node allocator.
305  */
306 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
307 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
308 #define	CACHE_CACHE 0
309 #define	SIZE_AC MAX_NUMNODES
310 #define	SIZE_L3 (2 * MAX_NUMNODES)
311 
312 static int drain_freelist(struct kmem_cache *cache,
313 			struct kmem_list3 *l3, int tofree);
314 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
315 			int node);
316 static int enable_cpucache(struct kmem_cache *cachep);
317 static void cache_reap(struct work_struct *unused);
318 
319 /*
320  * This function must be completely optimized away if a constant is passed to
321  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
322  */
323 static __always_inline int index_of(const size_t size)
324 {
325 	extern void __bad_size(void);
326 
327 	if (__builtin_constant_p(size)) {
328 		int i = 0;
329 
330 #define CACHE(x) \
331 	if (size <=x) \
332 		return i; \
333 	else \
334 		i++;
335 #include <linux/kmalloc_sizes.h>
336 #undef CACHE
337 		__bad_size();
338 	} else
339 		__bad_size();
340 	return 0;
341 }
342 
343 static int slab_early_init = 1;
344 
345 #define INDEX_AC index_of(sizeof(struct arraycache_init))
346 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
347 
348 static void kmem_list3_init(struct kmem_list3 *parent)
349 {
350 	INIT_LIST_HEAD(&parent->slabs_full);
351 	INIT_LIST_HEAD(&parent->slabs_partial);
352 	INIT_LIST_HEAD(&parent->slabs_free);
353 	parent->shared = NULL;
354 	parent->alien = NULL;
355 	parent->colour_next = 0;
356 	spin_lock_init(&parent->list_lock);
357 	parent->free_objects = 0;
358 	parent->free_touched = 0;
359 }
360 
361 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
362 	do {								\
363 		INIT_LIST_HEAD(listp);					\
364 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
365 	} while (0)
366 
367 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
368 	do {								\
369 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
370 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
371 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
372 	} while (0)
373 
374 /*
375  * struct kmem_cache
376  *
377  * manages a cache.
378  */
379 
380 struct kmem_cache {
381 /* 1) per-cpu data, touched during every alloc/free */
382 	struct array_cache *array[NR_CPUS];
383 /* 2) Cache tunables. Protected by cache_chain_mutex */
384 	unsigned int batchcount;
385 	unsigned int limit;
386 	unsigned int shared;
387 
388 	unsigned int buffer_size;
389 	u32 reciprocal_buffer_size;
390 /* 3) touched by every alloc & free from the backend */
391 
392 	unsigned int flags;		/* constant flags */
393 	unsigned int num;		/* # of objs per slab */
394 
395 /* 4) cache_grow/shrink */
396 	/* order of pgs per slab (2^n) */
397 	unsigned int gfporder;
398 
399 	/* force GFP flags, e.g. GFP_DMA */
400 	gfp_t gfpflags;
401 
402 	size_t colour;			/* cache colouring range */
403 	unsigned int colour_off;	/* colour offset */
404 	struct kmem_cache *slabp_cache;
405 	unsigned int slab_size;
406 	unsigned int dflags;		/* dynamic flags */
407 
408 	/* constructor func */
409 	void (*ctor)(void *obj);
410 
411 /* 5) cache creation/removal */
412 	const char *name;
413 	struct list_head next;
414 
415 /* 6) statistics */
416 #if STATS
417 	unsigned long num_active;
418 	unsigned long num_allocations;
419 	unsigned long high_mark;
420 	unsigned long grown;
421 	unsigned long reaped;
422 	unsigned long errors;
423 	unsigned long max_freeable;
424 	unsigned long node_allocs;
425 	unsigned long node_frees;
426 	unsigned long node_overflow;
427 	atomic_t allochit;
428 	atomic_t allocmiss;
429 	atomic_t freehit;
430 	atomic_t freemiss;
431 #endif
432 #if DEBUG
433 	/*
434 	 * If debugging is enabled, then the allocator can add additional
435 	 * fields and/or padding to every object. buffer_size contains the total
436 	 * object size including these internal fields, the following two
437 	 * variables contain the offset to the user object and its size.
438 	 */
439 	int obj_offset;
440 	int obj_size;
441 #endif
442 	/*
443 	 * We put nodelists[] at the end of kmem_cache, because we want to size
444 	 * this array to nr_node_ids slots instead of MAX_NUMNODES
445 	 * (see kmem_cache_init())
446 	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
447 	 * is statically defined, so we reserve the max number of nodes.
448 	 */
449 	struct kmem_list3 *nodelists[MAX_NUMNODES];
450 	/*
451 	 * Do not add fields after nodelists[]
452 	 */
453 };
454 
455 #define CFLGS_OFF_SLAB		(0x80000000UL)
456 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
457 
458 #define BATCHREFILL_LIMIT	16
459 /*
460  * Optimization question: fewer reaps means less probability for unnessary
461  * cpucache drain/refill cycles.
462  *
463  * OTOH the cpuarrays can contain lots of objects,
464  * which could lock up otherwise freeable slabs.
465  */
466 #define REAPTIMEOUT_CPUC	(2*HZ)
467 #define REAPTIMEOUT_LIST3	(4*HZ)
468 
469 #if STATS
470 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
471 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
472 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
473 #define	STATS_INC_GROWN(x)	((x)->grown++)
474 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
475 #define	STATS_SET_HIGH(x)						\
476 	do {								\
477 		if ((x)->num_active > (x)->high_mark)			\
478 			(x)->high_mark = (x)->num_active;		\
479 	} while (0)
480 #define	STATS_INC_ERR(x)	((x)->errors++)
481 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
482 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
483 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
484 #define	STATS_SET_FREEABLE(x, i)					\
485 	do {								\
486 		if ((x)->max_freeable < i)				\
487 			(x)->max_freeable = i;				\
488 	} while (0)
489 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
490 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
491 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
492 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
493 #else
494 #define	STATS_INC_ACTIVE(x)	do { } while (0)
495 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
496 #define	STATS_INC_ALLOCED(x)	do { } while (0)
497 #define	STATS_INC_GROWN(x)	do { } while (0)
498 #define	STATS_ADD_REAPED(x,y)	do { } while (0)
499 #define	STATS_SET_HIGH(x)	do { } while (0)
500 #define	STATS_INC_ERR(x)	do { } while (0)
501 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
502 #define	STATS_INC_NODEFREES(x)	do { } while (0)
503 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
504 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
505 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
506 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
507 #define STATS_INC_FREEHIT(x)	do { } while (0)
508 #define STATS_INC_FREEMISS(x)	do { } while (0)
509 #endif
510 
511 #if DEBUG
512 
513 /*
514  * memory layout of objects:
515  * 0		: objp
516  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
517  * 		the end of an object is aligned with the end of the real
518  * 		allocation. Catches writes behind the end of the allocation.
519  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
520  * 		redzone word.
521  * cachep->obj_offset: The real object.
522  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
523  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
524  *					[BYTES_PER_WORD long]
525  */
526 static int obj_offset(struct kmem_cache *cachep)
527 {
528 	return cachep->obj_offset;
529 }
530 
531 static int obj_size(struct kmem_cache *cachep)
532 {
533 	return cachep->obj_size;
534 }
535 
536 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
537 {
538 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
539 	return (unsigned long long*) (objp + obj_offset(cachep) -
540 				      sizeof(unsigned long long));
541 }
542 
543 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
544 {
545 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
546 	if (cachep->flags & SLAB_STORE_USER)
547 		return (unsigned long long *)(objp + cachep->buffer_size -
548 					      sizeof(unsigned long long) -
549 					      REDZONE_ALIGN);
550 	return (unsigned long long *) (objp + cachep->buffer_size -
551 				       sizeof(unsigned long long));
552 }
553 
554 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
555 {
556 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
557 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
558 }
559 
560 #else
561 
562 #define obj_offset(x)			0
563 #define obj_size(cachep)		(cachep->buffer_size)
564 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
565 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
566 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
567 
568 #endif
569 
570 /*
571  * Do not go above this order unless 0 objects fit into the slab.
572  */
573 #define	BREAK_GFP_ORDER_HI	1
574 #define	BREAK_GFP_ORDER_LO	0
575 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
576 
577 /*
578  * Functions for storing/retrieving the cachep and or slab from the page
579  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
580  * these are used to find the cache which an obj belongs to.
581  */
582 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
583 {
584 	page->lru.next = (struct list_head *)cache;
585 }
586 
587 static inline struct kmem_cache *page_get_cache(struct page *page)
588 {
589 	page = compound_head(page);
590 	BUG_ON(!PageSlab(page));
591 	return (struct kmem_cache *)page->lru.next;
592 }
593 
594 static inline void page_set_slab(struct page *page, struct slab *slab)
595 {
596 	page->lru.prev = (struct list_head *)slab;
597 }
598 
599 static inline struct slab *page_get_slab(struct page *page)
600 {
601 	BUG_ON(!PageSlab(page));
602 	return (struct slab *)page->lru.prev;
603 }
604 
605 static inline struct kmem_cache *virt_to_cache(const void *obj)
606 {
607 	struct page *page = virt_to_head_page(obj);
608 	return page_get_cache(page);
609 }
610 
611 static inline struct slab *virt_to_slab(const void *obj)
612 {
613 	struct page *page = virt_to_head_page(obj);
614 	return page_get_slab(page);
615 }
616 
617 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
618 				 unsigned int idx)
619 {
620 	return slab->s_mem + cache->buffer_size * idx;
621 }
622 
623 /*
624  * We want to avoid an expensive divide : (offset / cache->buffer_size)
625  *   Using the fact that buffer_size is a constant for a particular cache,
626  *   we can replace (offset / cache->buffer_size) by
627  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
628  */
629 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
630 					const struct slab *slab, void *obj)
631 {
632 	u32 offset = (obj - slab->s_mem);
633 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
634 }
635 
636 /*
637  * These are the default caches for kmalloc. Custom caches can have other sizes.
638  */
639 struct cache_sizes malloc_sizes[] = {
640 #define CACHE(x) { .cs_size = (x) },
641 #include <linux/kmalloc_sizes.h>
642 	CACHE(ULONG_MAX)
643 #undef CACHE
644 };
645 EXPORT_SYMBOL(malloc_sizes);
646 
647 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
648 struct cache_names {
649 	char *name;
650 	char *name_dma;
651 };
652 
653 static struct cache_names __initdata cache_names[] = {
654 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
655 #include <linux/kmalloc_sizes.h>
656 	{NULL,}
657 #undef CACHE
658 };
659 
660 static struct arraycache_init initarray_cache __initdata =
661     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
662 static struct arraycache_init initarray_generic =
663     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
664 
665 /* internal cache of cache description objs */
666 static struct kmem_cache cache_cache = {
667 	.batchcount = 1,
668 	.limit = BOOT_CPUCACHE_ENTRIES,
669 	.shared = 1,
670 	.buffer_size = sizeof(struct kmem_cache),
671 	.name = "kmem_cache",
672 };
673 
674 #define BAD_ALIEN_MAGIC 0x01020304ul
675 
676 #ifdef CONFIG_LOCKDEP
677 
678 /*
679  * Slab sometimes uses the kmalloc slabs to store the slab headers
680  * for other slabs "off slab".
681  * The locking for this is tricky in that it nests within the locks
682  * of all other slabs in a few places; to deal with this special
683  * locking we put on-slab caches into a separate lock-class.
684  *
685  * We set lock class for alien array caches which are up during init.
686  * The lock annotation will be lost if all cpus of a node goes down and
687  * then comes back up during hotplug
688  */
689 static struct lock_class_key on_slab_l3_key;
690 static struct lock_class_key on_slab_alc_key;
691 
692 static inline void init_lock_keys(void)
693 
694 {
695 	int q;
696 	struct cache_sizes *s = malloc_sizes;
697 
698 	while (s->cs_size != ULONG_MAX) {
699 		for_each_node(q) {
700 			struct array_cache **alc;
701 			int r;
702 			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
703 			if (!l3 || OFF_SLAB(s->cs_cachep))
704 				continue;
705 			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
706 			alc = l3->alien;
707 			/*
708 			 * FIXME: This check for BAD_ALIEN_MAGIC
709 			 * should go away when common slab code is taught to
710 			 * work even without alien caches.
711 			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
712 			 * for alloc_alien_cache,
713 			 */
714 			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
715 				continue;
716 			for_each_node(r) {
717 				if (alc[r])
718 					lockdep_set_class(&alc[r]->lock,
719 					     &on_slab_alc_key);
720 			}
721 		}
722 		s++;
723 	}
724 }
725 #else
726 static inline void init_lock_keys(void)
727 {
728 }
729 #endif
730 
731 /*
732  * Guard access to the cache-chain.
733  */
734 static DEFINE_MUTEX(cache_chain_mutex);
735 static struct list_head cache_chain;
736 
737 /*
738  * chicken and egg problem: delay the per-cpu array allocation
739  * until the general caches are up.
740  */
741 static enum {
742 	NONE,
743 	PARTIAL_AC,
744 	PARTIAL_L3,
745 	FULL
746 } g_cpucache_up;
747 
748 /*
749  * used by boot code to determine if it can use slab based allocator
750  */
751 int slab_is_available(void)
752 {
753 	return g_cpucache_up == FULL;
754 }
755 
756 static DEFINE_PER_CPU(struct delayed_work, reap_work);
757 
758 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
759 {
760 	return cachep->array[smp_processor_id()];
761 }
762 
763 static inline struct kmem_cache *__find_general_cachep(size_t size,
764 							gfp_t gfpflags)
765 {
766 	struct cache_sizes *csizep = malloc_sizes;
767 
768 #if DEBUG
769 	/* This happens if someone tries to call
770 	 * kmem_cache_create(), or __kmalloc(), before
771 	 * the generic caches are initialized.
772 	 */
773 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
774 #endif
775 	if (!size)
776 		return ZERO_SIZE_PTR;
777 
778 	while (size > csizep->cs_size)
779 		csizep++;
780 
781 	/*
782 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
783 	 * has cs_{dma,}cachep==NULL. Thus no special case
784 	 * for large kmalloc calls required.
785 	 */
786 #ifdef CONFIG_ZONE_DMA
787 	if (unlikely(gfpflags & GFP_DMA))
788 		return csizep->cs_dmacachep;
789 #endif
790 	return csizep->cs_cachep;
791 }
792 
793 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
794 {
795 	return __find_general_cachep(size, gfpflags);
796 }
797 
798 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
799 {
800 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
801 }
802 
803 /*
804  * Calculate the number of objects and left-over bytes for a given buffer size.
805  */
806 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
807 			   size_t align, int flags, size_t *left_over,
808 			   unsigned int *num)
809 {
810 	int nr_objs;
811 	size_t mgmt_size;
812 	size_t slab_size = PAGE_SIZE << gfporder;
813 
814 	/*
815 	 * The slab management structure can be either off the slab or
816 	 * on it. For the latter case, the memory allocated for a
817 	 * slab is used for:
818 	 *
819 	 * - The struct slab
820 	 * - One kmem_bufctl_t for each object
821 	 * - Padding to respect alignment of @align
822 	 * - @buffer_size bytes for each object
823 	 *
824 	 * If the slab management structure is off the slab, then the
825 	 * alignment will already be calculated into the size. Because
826 	 * the slabs are all pages aligned, the objects will be at the
827 	 * correct alignment when allocated.
828 	 */
829 	if (flags & CFLGS_OFF_SLAB) {
830 		mgmt_size = 0;
831 		nr_objs = slab_size / buffer_size;
832 
833 		if (nr_objs > SLAB_LIMIT)
834 			nr_objs = SLAB_LIMIT;
835 	} else {
836 		/*
837 		 * Ignore padding for the initial guess. The padding
838 		 * is at most @align-1 bytes, and @buffer_size is at
839 		 * least @align. In the worst case, this result will
840 		 * be one greater than the number of objects that fit
841 		 * into the memory allocation when taking the padding
842 		 * into account.
843 		 */
844 		nr_objs = (slab_size - sizeof(struct slab)) /
845 			  (buffer_size + sizeof(kmem_bufctl_t));
846 
847 		/*
848 		 * This calculated number will be either the right
849 		 * amount, or one greater than what we want.
850 		 */
851 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
852 		       > slab_size)
853 			nr_objs--;
854 
855 		if (nr_objs > SLAB_LIMIT)
856 			nr_objs = SLAB_LIMIT;
857 
858 		mgmt_size = slab_mgmt_size(nr_objs, align);
859 	}
860 	*num = nr_objs;
861 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
862 }
863 
864 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
865 
866 static void __slab_error(const char *function, struct kmem_cache *cachep,
867 			char *msg)
868 {
869 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
870 	       function, cachep->name, msg);
871 	dump_stack();
872 }
873 
874 /*
875  * By default on NUMA we use alien caches to stage the freeing of
876  * objects allocated from other nodes. This causes massive memory
877  * inefficiencies when using fake NUMA setup to split memory into a
878  * large number of small nodes, so it can be disabled on the command
879  * line
880   */
881 
882 static int use_alien_caches __read_mostly = 1;
883 static int numa_platform __read_mostly = 1;
884 static int __init noaliencache_setup(char *s)
885 {
886 	use_alien_caches = 0;
887 	return 1;
888 }
889 __setup("noaliencache", noaliencache_setup);
890 
891 #ifdef CONFIG_NUMA
892 /*
893  * Special reaping functions for NUMA systems called from cache_reap().
894  * These take care of doing round robin flushing of alien caches (containing
895  * objects freed on different nodes from which they were allocated) and the
896  * flushing of remote pcps by calling drain_node_pages.
897  */
898 static DEFINE_PER_CPU(unsigned long, reap_node);
899 
900 static void init_reap_node(int cpu)
901 {
902 	int node;
903 
904 	node = next_node(cpu_to_node(cpu), node_online_map);
905 	if (node == MAX_NUMNODES)
906 		node = first_node(node_online_map);
907 
908 	per_cpu(reap_node, cpu) = node;
909 }
910 
911 static void next_reap_node(void)
912 {
913 	int node = __get_cpu_var(reap_node);
914 
915 	node = next_node(node, node_online_map);
916 	if (unlikely(node >= MAX_NUMNODES))
917 		node = first_node(node_online_map);
918 	__get_cpu_var(reap_node) = node;
919 }
920 
921 #else
922 #define init_reap_node(cpu) do { } while (0)
923 #define next_reap_node(void) do { } while (0)
924 #endif
925 
926 /*
927  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
928  * via the workqueue/eventd.
929  * Add the CPU number into the expiration time to minimize the possibility of
930  * the CPUs getting into lockstep and contending for the global cache chain
931  * lock.
932  */
933 static void __cpuinit start_cpu_timer(int cpu)
934 {
935 	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
936 
937 	/*
938 	 * When this gets called from do_initcalls via cpucache_init(),
939 	 * init_workqueues() has already run, so keventd will be setup
940 	 * at that time.
941 	 */
942 	if (keventd_up() && reap_work->work.func == NULL) {
943 		init_reap_node(cpu);
944 		INIT_DELAYED_WORK(reap_work, cache_reap);
945 		schedule_delayed_work_on(cpu, reap_work,
946 					__round_jiffies_relative(HZ, cpu));
947 	}
948 }
949 
950 static struct array_cache *alloc_arraycache(int node, int entries,
951 					    int batchcount)
952 {
953 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
954 	struct array_cache *nc = NULL;
955 
956 	nc = kmalloc_node(memsize, GFP_KERNEL, node);
957 	if (nc) {
958 		nc->avail = 0;
959 		nc->limit = entries;
960 		nc->batchcount = batchcount;
961 		nc->touched = 0;
962 		spin_lock_init(&nc->lock);
963 	}
964 	return nc;
965 }
966 
967 /*
968  * Transfer objects in one arraycache to another.
969  * Locking must be handled by the caller.
970  *
971  * Return the number of entries transferred.
972  */
973 static int transfer_objects(struct array_cache *to,
974 		struct array_cache *from, unsigned int max)
975 {
976 	/* Figure out how many entries to transfer */
977 	int nr = min(min(from->avail, max), to->limit - to->avail);
978 
979 	if (!nr)
980 		return 0;
981 
982 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
983 			sizeof(void *) *nr);
984 
985 	from->avail -= nr;
986 	to->avail += nr;
987 	to->touched = 1;
988 	return nr;
989 }
990 
991 #ifndef CONFIG_NUMA
992 
993 #define drain_alien_cache(cachep, alien) do { } while (0)
994 #define reap_alien(cachep, l3) do { } while (0)
995 
996 static inline struct array_cache **alloc_alien_cache(int node, int limit)
997 {
998 	return (struct array_cache **)BAD_ALIEN_MAGIC;
999 }
1000 
1001 static inline void free_alien_cache(struct array_cache **ac_ptr)
1002 {
1003 }
1004 
1005 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1006 {
1007 	return 0;
1008 }
1009 
1010 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1011 		gfp_t flags)
1012 {
1013 	return NULL;
1014 }
1015 
1016 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1017 		 gfp_t flags, int nodeid)
1018 {
1019 	return NULL;
1020 }
1021 
1022 #else	/* CONFIG_NUMA */
1023 
1024 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1025 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1026 
1027 static struct array_cache **alloc_alien_cache(int node, int limit)
1028 {
1029 	struct array_cache **ac_ptr;
1030 	int memsize = sizeof(void *) * nr_node_ids;
1031 	int i;
1032 
1033 	if (limit > 1)
1034 		limit = 12;
1035 	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1036 	if (ac_ptr) {
1037 		for_each_node(i) {
1038 			if (i == node || !node_online(i)) {
1039 				ac_ptr[i] = NULL;
1040 				continue;
1041 			}
1042 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1043 			if (!ac_ptr[i]) {
1044 				for (i--; i >= 0; i--)
1045 					kfree(ac_ptr[i]);
1046 				kfree(ac_ptr);
1047 				return NULL;
1048 			}
1049 		}
1050 	}
1051 	return ac_ptr;
1052 }
1053 
1054 static void free_alien_cache(struct array_cache **ac_ptr)
1055 {
1056 	int i;
1057 
1058 	if (!ac_ptr)
1059 		return;
1060 	for_each_node(i)
1061 	    kfree(ac_ptr[i]);
1062 	kfree(ac_ptr);
1063 }
1064 
1065 static void __drain_alien_cache(struct kmem_cache *cachep,
1066 				struct array_cache *ac, int node)
1067 {
1068 	struct kmem_list3 *rl3 = cachep->nodelists[node];
1069 
1070 	if (ac->avail) {
1071 		spin_lock(&rl3->list_lock);
1072 		/*
1073 		 * Stuff objects into the remote nodes shared array first.
1074 		 * That way we could avoid the overhead of putting the objects
1075 		 * into the free lists and getting them back later.
1076 		 */
1077 		if (rl3->shared)
1078 			transfer_objects(rl3->shared, ac, ac->limit);
1079 
1080 		free_block(cachep, ac->entry, ac->avail, node);
1081 		ac->avail = 0;
1082 		spin_unlock(&rl3->list_lock);
1083 	}
1084 }
1085 
1086 /*
1087  * Called from cache_reap() to regularly drain alien caches round robin.
1088  */
1089 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1090 {
1091 	int node = __get_cpu_var(reap_node);
1092 
1093 	if (l3->alien) {
1094 		struct array_cache *ac = l3->alien[node];
1095 
1096 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1097 			__drain_alien_cache(cachep, ac, node);
1098 			spin_unlock_irq(&ac->lock);
1099 		}
1100 	}
1101 }
1102 
1103 static void drain_alien_cache(struct kmem_cache *cachep,
1104 				struct array_cache **alien)
1105 {
1106 	int i = 0;
1107 	struct array_cache *ac;
1108 	unsigned long flags;
1109 
1110 	for_each_online_node(i) {
1111 		ac = alien[i];
1112 		if (ac) {
1113 			spin_lock_irqsave(&ac->lock, flags);
1114 			__drain_alien_cache(cachep, ac, i);
1115 			spin_unlock_irqrestore(&ac->lock, flags);
1116 		}
1117 	}
1118 }
1119 
1120 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1121 {
1122 	struct slab *slabp = virt_to_slab(objp);
1123 	int nodeid = slabp->nodeid;
1124 	struct kmem_list3 *l3;
1125 	struct array_cache *alien = NULL;
1126 	int node;
1127 
1128 	node = numa_node_id();
1129 
1130 	/*
1131 	 * Make sure we are not freeing a object from another node to the array
1132 	 * cache on this cpu.
1133 	 */
1134 	if (likely(slabp->nodeid == node))
1135 		return 0;
1136 
1137 	l3 = cachep->nodelists[node];
1138 	STATS_INC_NODEFREES(cachep);
1139 	if (l3->alien && l3->alien[nodeid]) {
1140 		alien = l3->alien[nodeid];
1141 		spin_lock(&alien->lock);
1142 		if (unlikely(alien->avail == alien->limit)) {
1143 			STATS_INC_ACOVERFLOW(cachep);
1144 			__drain_alien_cache(cachep, alien, nodeid);
1145 		}
1146 		alien->entry[alien->avail++] = objp;
1147 		spin_unlock(&alien->lock);
1148 	} else {
1149 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1150 		free_block(cachep, &objp, 1, nodeid);
1151 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1152 	}
1153 	return 1;
1154 }
1155 #endif
1156 
1157 static void __cpuinit cpuup_canceled(long cpu)
1158 {
1159 	struct kmem_cache *cachep;
1160 	struct kmem_list3 *l3 = NULL;
1161 	int node = cpu_to_node(cpu);
1162 	node_to_cpumask_ptr(mask, node);
1163 
1164 	list_for_each_entry(cachep, &cache_chain, next) {
1165 		struct array_cache *nc;
1166 		struct array_cache *shared;
1167 		struct array_cache **alien;
1168 
1169 		/* cpu is dead; no one can alloc from it. */
1170 		nc = cachep->array[cpu];
1171 		cachep->array[cpu] = NULL;
1172 		l3 = cachep->nodelists[node];
1173 
1174 		if (!l3)
1175 			goto free_array_cache;
1176 
1177 		spin_lock_irq(&l3->list_lock);
1178 
1179 		/* Free limit for this kmem_list3 */
1180 		l3->free_limit -= cachep->batchcount;
1181 		if (nc)
1182 			free_block(cachep, nc->entry, nc->avail, node);
1183 
1184 		if (!cpus_empty(*mask)) {
1185 			spin_unlock_irq(&l3->list_lock);
1186 			goto free_array_cache;
1187 		}
1188 
1189 		shared = l3->shared;
1190 		if (shared) {
1191 			free_block(cachep, shared->entry,
1192 				   shared->avail, node);
1193 			l3->shared = NULL;
1194 		}
1195 
1196 		alien = l3->alien;
1197 		l3->alien = NULL;
1198 
1199 		spin_unlock_irq(&l3->list_lock);
1200 
1201 		kfree(shared);
1202 		if (alien) {
1203 			drain_alien_cache(cachep, alien);
1204 			free_alien_cache(alien);
1205 		}
1206 free_array_cache:
1207 		kfree(nc);
1208 	}
1209 	/*
1210 	 * In the previous loop, all the objects were freed to
1211 	 * the respective cache's slabs,  now we can go ahead and
1212 	 * shrink each nodelist to its limit.
1213 	 */
1214 	list_for_each_entry(cachep, &cache_chain, next) {
1215 		l3 = cachep->nodelists[node];
1216 		if (!l3)
1217 			continue;
1218 		drain_freelist(cachep, l3, l3->free_objects);
1219 	}
1220 }
1221 
1222 static int __cpuinit cpuup_prepare(long cpu)
1223 {
1224 	struct kmem_cache *cachep;
1225 	struct kmem_list3 *l3 = NULL;
1226 	int node = cpu_to_node(cpu);
1227 	const int memsize = sizeof(struct kmem_list3);
1228 
1229 	/*
1230 	 * We need to do this right in the beginning since
1231 	 * alloc_arraycache's are going to use this list.
1232 	 * kmalloc_node allows us to add the slab to the right
1233 	 * kmem_list3 and not this cpu's kmem_list3
1234 	 */
1235 
1236 	list_for_each_entry(cachep, &cache_chain, next) {
1237 		/*
1238 		 * Set up the size64 kmemlist for cpu before we can
1239 		 * begin anything. Make sure some other cpu on this
1240 		 * node has not already allocated this
1241 		 */
1242 		if (!cachep->nodelists[node]) {
1243 			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1244 			if (!l3)
1245 				goto bad;
1246 			kmem_list3_init(l3);
1247 			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1248 			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1249 
1250 			/*
1251 			 * The l3s don't come and go as CPUs come and
1252 			 * go.  cache_chain_mutex is sufficient
1253 			 * protection here.
1254 			 */
1255 			cachep->nodelists[node] = l3;
1256 		}
1257 
1258 		spin_lock_irq(&cachep->nodelists[node]->list_lock);
1259 		cachep->nodelists[node]->free_limit =
1260 			(1 + nr_cpus_node(node)) *
1261 			cachep->batchcount + cachep->num;
1262 		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1263 	}
1264 
1265 	/*
1266 	 * Now we can go ahead with allocating the shared arrays and
1267 	 * array caches
1268 	 */
1269 	list_for_each_entry(cachep, &cache_chain, next) {
1270 		struct array_cache *nc;
1271 		struct array_cache *shared = NULL;
1272 		struct array_cache **alien = NULL;
1273 
1274 		nc = alloc_arraycache(node, cachep->limit,
1275 					cachep->batchcount);
1276 		if (!nc)
1277 			goto bad;
1278 		if (cachep->shared) {
1279 			shared = alloc_arraycache(node,
1280 				cachep->shared * cachep->batchcount,
1281 				0xbaadf00d);
1282 			if (!shared) {
1283 				kfree(nc);
1284 				goto bad;
1285 			}
1286 		}
1287 		if (use_alien_caches) {
1288 			alien = alloc_alien_cache(node, cachep->limit);
1289 			if (!alien) {
1290 				kfree(shared);
1291 				kfree(nc);
1292 				goto bad;
1293 			}
1294 		}
1295 		cachep->array[cpu] = nc;
1296 		l3 = cachep->nodelists[node];
1297 		BUG_ON(!l3);
1298 
1299 		spin_lock_irq(&l3->list_lock);
1300 		if (!l3->shared) {
1301 			/*
1302 			 * We are serialised from CPU_DEAD or
1303 			 * CPU_UP_CANCELLED by the cpucontrol lock
1304 			 */
1305 			l3->shared = shared;
1306 			shared = NULL;
1307 		}
1308 #ifdef CONFIG_NUMA
1309 		if (!l3->alien) {
1310 			l3->alien = alien;
1311 			alien = NULL;
1312 		}
1313 #endif
1314 		spin_unlock_irq(&l3->list_lock);
1315 		kfree(shared);
1316 		free_alien_cache(alien);
1317 	}
1318 	return 0;
1319 bad:
1320 	cpuup_canceled(cpu);
1321 	return -ENOMEM;
1322 }
1323 
1324 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1325 				    unsigned long action, void *hcpu)
1326 {
1327 	long cpu = (long)hcpu;
1328 	int err = 0;
1329 
1330 	switch (action) {
1331 	case CPU_UP_PREPARE:
1332 	case CPU_UP_PREPARE_FROZEN:
1333 		mutex_lock(&cache_chain_mutex);
1334 		err = cpuup_prepare(cpu);
1335 		mutex_unlock(&cache_chain_mutex);
1336 		break;
1337 	case CPU_ONLINE:
1338 	case CPU_ONLINE_FROZEN:
1339 		start_cpu_timer(cpu);
1340 		break;
1341 #ifdef CONFIG_HOTPLUG_CPU
1342   	case CPU_DOWN_PREPARE:
1343   	case CPU_DOWN_PREPARE_FROZEN:
1344 		/*
1345 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1346 		 * held so that if cache_reap() is invoked it cannot do
1347 		 * anything expensive but will only modify reap_work
1348 		 * and reschedule the timer.
1349 		*/
1350 		cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1351 		/* Now the cache_reaper is guaranteed to be not running. */
1352 		per_cpu(reap_work, cpu).work.func = NULL;
1353   		break;
1354   	case CPU_DOWN_FAILED:
1355   	case CPU_DOWN_FAILED_FROZEN:
1356 		start_cpu_timer(cpu);
1357   		break;
1358 	case CPU_DEAD:
1359 	case CPU_DEAD_FROZEN:
1360 		/*
1361 		 * Even if all the cpus of a node are down, we don't free the
1362 		 * kmem_list3 of any cache. This to avoid a race between
1363 		 * cpu_down, and a kmalloc allocation from another cpu for
1364 		 * memory from the node of the cpu going down.  The list3
1365 		 * structure is usually allocated from kmem_cache_create() and
1366 		 * gets destroyed at kmem_cache_destroy().
1367 		 */
1368 		/* fall through */
1369 #endif
1370 	case CPU_UP_CANCELED:
1371 	case CPU_UP_CANCELED_FROZEN:
1372 		mutex_lock(&cache_chain_mutex);
1373 		cpuup_canceled(cpu);
1374 		mutex_unlock(&cache_chain_mutex);
1375 		break;
1376 	}
1377 	return err ? NOTIFY_BAD : NOTIFY_OK;
1378 }
1379 
1380 static struct notifier_block __cpuinitdata cpucache_notifier = {
1381 	&cpuup_callback, NULL, 0
1382 };
1383 
1384 /*
1385  * swap the static kmem_list3 with kmalloced memory
1386  */
1387 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1388 			int nodeid)
1389 {
1390 	struct kmem_list3 *ptr;
1391 
1392 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1393 	BUG_ON(!ptr);
1394 
1395 	local_irq_disable();
1396 	memcpy(ptr, list, sizeof(struct kmem_list3));
1397 	/*
1398 	 * Do not assume that spinlocks can be initialized via memcpy:
1399 	 */
1400 	spin_lock_init(&ptr->list_lock);
1401 
1402 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1403 	cachep->nodelists[nodeid] = ptr;
1404 	local_irq_enable();
1405 }
1406 
1407 /*
1408  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1409  * size of kmem_list3.
1410  */
1411 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1412 {
1413 	int node;
1414 
1415 	for_each_online_node(node) {
1416 		cachep->nodelists[node] = &initkmem_list3[index + node];
1417 		cachep->nodelists[node]->next_reap = jiffies +
1418 		    REAPTIMEOUT_LIST3 +
1419 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1420 	}
1421 }
1422 
1423 /*
1424  * Initialisation.  Called after the page allocator have been initialised and
1425  * before smp_init().
1426  */
1427 void __init kmem_cache_init(void)
1428 {
1429 	size_t left_over;
1430 	struct cache_sizes *sizes;
1431 	struct cache_names *names;
1432 	int i;
1433 	int order;
1434 	int node;
1435 
1436 	if (num_possible_nodes() == 1) {
1437 		use_alien_caches = 0;
1438 		numa_platform = 0;
1439 	}
1440 
1441 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1442 		kmem_list3_init(&initkmem_list3[i]);
1443 		if (i < MAX_NUMNODES)
1444 			cache_cache.nodelists[i] = NULL;
1445 	}
1446 	set_up_list3s(&cache_cache, CACHE_CACHE);
1447 
1448 	/*
1449 	 * Fragmentation resistance on low memory - only use bigger
1450 	 * page orders on machines with more than 32MB of memory.
1451 	 */
1452 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1453 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1454 
1455 	/* Bootstrap is tricky, because several objects are allocated
1456 	 * from caches that do not exist yet:
1457 	 * 1) initialize the cache_cache cache: it contains the struct
1458 	 *    kmem_cache structures of all caches, except cache_cache itself:
1459 	 *    cache_cache is statically allocated.
1460 	 *    Initially an __init data area is used for the head array and the
1461 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1462 	 *    array at the end of the bootstrap.
1463 	 * 2) Create the first kmalloc cache.
1464 	 *    The struct kmem_cache for the new cache is allocated normally.
1465 	 *    An __init data area is used for the head array.
1466 	 * 3) Create the remaining kmalloc caches, with minimally sized
1467 	 *    head arrays.
1468 	 * 4) Replace the __init data head arrays for cache_cache and the first
1469 	 *    kmalloc cache with kmalloc allocated arrays.
1470 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1471 	 *    the other cache's with kmalloc allocated memory.
1472 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1473 	 */
1474 
1475 	node = numa_node_id();
1476 
1477 	/* 1) create the cache_cache */
1478 	INIT_LIST_HEAD(&cache_chain);
1479 	list_add(&cache_cache.next, &cache_chain);
1480 	cache_cache.colour_off = cache_line_size();
1481 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1482 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1483 
1484 	/*
1485 	 * struct kmem_cache size depends on nr_node_ids, which
1486 	 * can be less than MAX_NUMNODES.
1487 	 */
1488 	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1489 				 nr_node_ids * sizeof(struct kmem_list3 *);
1490 #if DEBUG
1491 	cache_cache.obj_size = cache_cache.buffer_size;
1492 #endif
1493 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1494 					cache_line_size());
1495 	cache_cache.reciprocal_buffer_size =
1496 		reciprocal_value(cache_cache.buffer_size);
1497 
1498 	for (order = 0; order < MAX_ORDER; order++) {
1499 		cache_estimate(order, cache_cache.buffer_size,
1500 			cache_line_size(), 0, &left_over, &cache_cache.num);
1501 		if (cache_cache.num)
1502 			break;
1503 	}
1504 	BUG_ON(!cache_cache.num);
1505 	cache_cache.gfporder = order;
1506 	cache_cache.colour = left_over / cache_cache.colour_off;
1507 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1508 				      sizeof(struct slab), cache_line_size());
1509 
1510 	/* 2+3) create the kmalloc caches */
1511 	sizes = malloc_sizes;
1512 	names = cache_names;
1513 
1514 	/*
1515 	 * Initialize the caches that provide memory for the array cache and the
1516 	 * kmem_list3 structures first.  Without this, further allocations will
1517 	 * bug.
1518 	 */
1519 
1520 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1521 					sizes[INDEX_AC].cs_size,
1522 					ARCH_KMALLOC_MINALIGN,
1523 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1524 					NULL);
1525 
1526 	if (INDEX_AC != INDEX_L3) {
1527 		sizes[INDEX_L3].cs_cachep =
1528 			kmem_cache_create(names[INDEX_L3].name,
1529 				sizes[INDEX_L3].cs_size,
1530 				ARCH_KMALLOC_MINALIGN,
1531 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1532 				NULL);
1533 	}
1534 
1535 	slab_early_init = 0;
1536 
1537 	while (sizes->cs_size != ULONG_MAX) {
1538 		/*
1539 		 * For performance, all the general caches are L1 aligned.
1540 		 * This should be particularly beneficial on SMP boxes, as it
1541 		 * eliminates "false sharing".
1542 		 * Note for systems short on memory removing the alignment will
1543 		 * allow tighter packing of the smaller caches.
1544 		 */
1545 		if (!sizes->cs_cachep) {
1546 			sizes->cs_cachep = kmem_cache_create(names->name,
1547 					sizes->cs_size,
1548 					ARCH_KMALLOC_MINALIGN,
1549 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1550 					NULL);
1551 		}
1552 #ifdef CONFIG_ZONE_DMA
1553 		sizes->cs_dmacachep = kmem_cache_create(
1554 					names->name_dma,
1555 					sizes->cs_size,
1556 					ARCH_KMALLOC_MINALIGN,
1557 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1558 						SLAB_PANIC,
1559 					NULL);
1560 #endif
1561 		sizes++;
1562 		names++;
1563 	}
1564 	/* 4) Replace the bootstrap head arrays */
1565 	{
1566 		struct array_cache *ptr;
1567 
1568 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1569 
1570 		local_irq_disable();
1571 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1572 		memcpy(ptr, cpu_cache_get(&cache_cache),
1573 		       sizeof(struct arraycache_init));
1574 		/*
1575 		 * Do not assume that spinlocks can be initialized via memcpy:
1576 		 */
1577 		spin_lock_init(&ptr->lock);
1578 
1579 		cache_cache.array[smp_processor_id()] = ptr;
1580 		local_irq_enable();
1581 
1582 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1583 
1584 		local_irq_disable();
1585 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1586 		       != &initarray_generic.cache);
1587 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1588 		       sizeof(struct arraycache_init));
1589 		/*
1590 		 * Do not assume that spinlocks can be initialized via memcpy:
1591 		 */
1592 		spin_lock_init(&ptr->lock);
1593 
1594 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1595 		    ptr;
1596 		local_irq_enable();
1597 	}
1598 	/* 5) Replace the bootstrap kmem_list3's */
1599 	{
1600 		int nid;
1601 
1602 		for_each_online_node(nid) {
1603 			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1604 
1605 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1606 				  &initkmem_list3[SIZE_AC + nid], nid);
1607 
1608 			if (INDEX_AC != INDEX_L3) {
1609 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1610 					  &initkmem_list3[SIZE_L3 + nid], nid);
1611 			}
1612 		}
1613 	}
1614 
1615 	/* 6) resize the head arrays to their final sizes */
1616 	{
1617 		struct kmem_cache *cachep;
1618 		mutex_lock(&cache_chain_mutex);
1619 		list_for_each_entry(cachep, &cache_chain, next)
1620 			if (enable_cpucache(cachep))
1621 				BUG();
1622 		mutex_unlock(&cache_chain_mutex);
1623 	}
1624 
1625 	/* Annotate slab for lockdep -- annotate the malloc caches */
1626 	init_lock_keys();
1627 
1628 
1629 	/* Done! */
1630 	g_cpucache_up = FULL;
1631 
1632 	/*
1633 	 * Register a cpu startup notifier callback that initializes
1634 	 * cpu_cache_get for all new cpus
1635 	 */
1636 	register_cpu_notifier(&cpucache_notifier);
1637 
1638 	/*
1639 	 * The reap timers are started later, with a module init call: That part
1640 	 * of the kernel is not yet operational.
1641 	 */
1642 }
1643 
1644 static int __init cpucache_init(void)
1645 {
1646 	int cpu;
1647 
1648 	/*
1649 	 * Register the timers that return unneeded pages to the page allocator
1650 	 */
1651 	for_each_online_cpu(cpu)
1652 		start_cpu_timer(cpu);
1653 	return 0;
1654 }
1655 __initcall(cpucache_init);
1656 
1657 /*
1658  * Interface to system's page allocator. No need to hold the cache-lock.
1659  *
1660  * If we requested dmaable memory, we will get it. Even if we
1661  * did not request dmaable memory, we might get it, but that
1662  * would be relatively rare and ignorable.
1663  */
1664 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1665 {
1666 	struct page *page;
1667 	int nr_pages;
1668 	int i;
1669 
1670 #ifndef CONFIG_MMU
1671 	/*
1672 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1673 	 * requires __GFP_COMP to properly refcount higher order allocations
1674 	 */
1675 	flags |= __GFP_COMP;
1676 #endif
1677 
1678 	flags |= cachep->gfpflags;
1679 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1680 		flags |= __GFP_RECLAIMABLE;
1681 
1682 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1683 	if (!page)
1684 		return NULL;
1685 
1686 	nr_pages = (1 << cachep->gfporder);
1687 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1688 		add_zone_page_state(page_zone(page),
1689 			NR_SLAB_RECLAIMABLE, nr_pages);
1690 	else
1691 		add_zone_page_state(page_zone(page),
1692 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1693 	for (i = 0; i < nr_pages; i++)
1694 		__SetPageSlab(page + i);
1695 	return page_address(page);
1696 }
1697 
1698 /*
1699  * Interface to system's page release.
1700  */
1701 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1702 {
1703 	unsigned long i = (1 << cachep->gfporder);
1704 	struct page *page = virt_to_page(addr);
1705 	const unsigned long nr_freed = i;
1706 
1707 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1708 		sub_zone_page_state(page_zone(page),
1709 				NR_SLAB_RECLAIMABLE, nr_freed);
1710 	else
1711 		sub_zone_page_state(page_zone(page),
1712 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1713 	while (i--) {
1714 		BUG_ON(!PageSlab(page));
1715 		__ClearPageSlab(page);
1716 		page++;
1717 	}
1718 	if (current->reclaim_state)
1719 		current->reclaim_state->reclaimed_slab += nr_freed;
1720 	free_pages((unsigned long)addr, cachep->gfporder);
1721 }
1722 
1723 static void kmem_rcu_free(struct rcu_head *head)
1724 {
1725 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1726 	struct kmem_cache *cachep = slab_rcu->cachep;
1727 
1728 	kmem_freepages(cachep, slab_rcu->addr);
1729 	if (OFF_SLAB(cachep))
1730 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1731 }
1732 
1733 #if DEBUG
1734 
1735 #ifdef CONFIG_DEBUG_PAGEALLOC
1736 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1737 			    unsigned long caller)
1738 {
1739 	int size = obj_size(cachep);
1740 
1741 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1742 
1743 	if (size < 5 * sizeof(unsigned long))
1744 		return;
1745 
1746 	*addr++ = 0x12345678;
1747 	*addr++ = caller;
1748 	*addr++ = smp_processor_id();
1749 	size -= 3 * sizeof(unsigned long);
1750 	{
1751 		unsigned long *sptr = &caller;
1752 		unsigned long svalue;
1753 
1754 		while (!kstack_end(sptr)) {
1755 			svalue = *sptr++;
1756 			if (kernel_text_address(svalue)) {
1757 				*addr++ = svalue;
1758 				size -= sizeof(unsigned long);
1759 				if (size <= sizeof(unsigned long))
1760 					break;
1761 			}
1762 		}
1763 
1764 	}
1765 	*addr++ = 0x87654321;
1766 }
1767 #endif
1768 
1769 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1770 {
1771 	int size = obj_size(cachep);
1772 	addr = &((char *)addr)[obj_offset(cachep)];
1773 
1774 	memset(addr, val, size);
1775 	*(unsigned char *)(addr + size - 1) = POISON_END;
1776 }
1777 
1778 static void dump_line(char *data, int offset, int limit)
1779 {
1780 	int i;
1781 	unsigned char error = 0;
1782 	int bad_count = 0;
1783 
1784 	printk(KERN_ERR "%03x:", offset);
1785 	for (i = 0; i < limit; i++) {
1786 		if (data[offset + i] != POISON_FREE) {
1787 			error = data[offset + i];
1788 			bad_count++;
1789 		}
1790 		printk(" %02x", (unsigned char)data[offset + i]);
1791 	}
1792 	printk("\n");
1793 
1794 	if (bad_count == 1) {
1795 		error ^= POISON_FREE;
1796 		if (!(error & (error - 1))) {
1797 			printk(KERN_ERR "Single bit error detected. Probably "
1798 					"bad RAM.\n");
1799 #ifdef CONFIG_X86
1800 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1801 					"test tool.\n");
1802 #else
1803 			printk(KERN_ERR "Run a memory test tool.\n");
1804 #endif
1805 		}
1806 	}
1807 }
1808 #endif
1809 
1810 #if DEBUG
1811 
1812 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1813 {
1814 	int i, size;
1815 	char *realobj;
1816 
1817 	if (cachep->flags & SLAB_RED_ZONE) {
1818 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1819 			*dbg_redzone1(cachep, objp),
1820 			*dbg_redzone2(cachep, objp));
1821 	}
1822 
1823 	if (cachep->flags & SLAB_STORE_USER) {
1824 		printk(KERN_ERR "Last user: [<%p>]",
1825 			*dbg_userword(cachep, objp));
1826 		print_symbol("(%s)",
1827 				(unsigned long)*dbg_userword(cachep, objp));
1828 		printk("\n");
1829 	}
1830 	realobj = (char *)objp + obj_offset(cachep);
1831 	size = obj_size(cachep);
1832 	for (i = 0; i < size && lines; i += 16, lines--) {
1833 		int limit;
1834 		limit = 16;
1835 		if (i + limit > size)
1836 			limit = size - i;
1837 		dump_line(realobj, i, limit);
1838 	}
1839 }
1840 
1841 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1842 {
1843 	char *realobj;
1844 	int size, i;
1845 	int lines = 0;
1846 
1847 	realobj = (char *)objp + obj_offset(cachep);
1848 	size = obj_size(cachep);
1849 
1850 	for (i = 0; i < size; i++) {
1851 		char exp = POISON_FREE;
1852 		if (i == size - 1)
1853 			exp = POISON_END;
1854 		if (realobj[i] != exp) {
1855 			int limit;
1856 			/* Mismatch ! */
1857 			/* Print header */
1858 			if (lines == 0) {
1859 				printk(KERN_ERR
1860 					"Slab corruption: %s start=%p, len=%d\n",
1861 					cachep->name, realobj, size);
1862 				print_objinfo(cachep, objp, 0);
1863 			}
1864 			/* Hexdump the affected line */
1865 			i = (i / 16) * 16;
1866 			limit = 16;
1867 			if (i + limit > size)
1868 				limit = size - i;
1869 			dump_line(realobj, i, limit);
1870 			i += 16;
1871 			lines++;
1872 			/* Limit to 5 lines */
1873 			if (lines > 5)
1874 				break;
1875 		}
1876 	}
1877 	if (lines != 0) {
1878 		/* Print some data about the neighboring objects, if they
1879 		 * exist:
1880 		 */
1881 		struct slab *slabp = virt_to_slab(objp);
1882 		unsigned int objnr;
1883 
1884 		objnr = obj_to_index(cachep, slabp, objp);
1885 		if (objnr) {
1886 			objp = index_to_obj(cachep, slabp, objnr - 1);
1887 			realobj = (char *)objp + obj_offset(cachep);
1888 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1889 			       realobj, size);
1890 			print_objinfo(cachep, objp, 2);
1891 		}
1892 		if (objnr + 1 < cachep->num) {
1893 			objp = index_to_obj(cachep, slabp, objnr + 1);
1894 			realobj = (char *)objp + obj_offset(cachep);
1895 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1896 			       realobj, size);
1897 			print_objinfo(cachep, objp, 2);
1898 		}
1899 	}
1900 }
1901 #endif
1902 
1903 #if DEBUG
1904 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1905 {
1906 	int i;
1907 	for (i = 0; i < cachep->num; i++) {
1908 		void *objp = index_to_obj(cachep, slabp, i);
1909 
1910 		if (cachep->flags & SLAB_POISON) {
1911 #ifdef CONFIG_DEBUG_PAGEALLOC
1912 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1913 					OFF_SLAB(cachep))
1914 				kernel_map_pages(virt_to_page(objp),
1915 					cachep->buffer_size / PAGE_SIZE, 1);
1916 			else
1917 				check_poison_obj(cachep, objp);
1918 #else
1919 			check_poison_obj(cachep, objp);
1920 #endif
1921 		}
1922 		if (cachep->flags & SLAB_RED_ZONE) {
1923 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1924 				slab_error(cachep, "start of a freed object "
1925 					   "was overwritten");
1926 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1927 				slab_error(cachep, "end of a freed object "
1928 					   "was overwritten");
1929 		}
1930 	}
1931 }
1932 #else
1933 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1934 {
1935 }
1936 #endif
1937 
1938 /**
1939  * slab_destroy - destroy and release all objects in a slab
1940  * @cachep: cache pointer being destroyed
1941  * @slabp: slab pointer being destroyed
1942  *
1943  * Destroy all the objs in a slab, and release the mem back to the system.
1944  * Before calling the slab must have been unlinked from the cache.  The
1945  * cache-lock is not held/needed.
1946  */
1947 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1948 {
1949 	void *addr = slabp->s_mem - slabp->colouroff;
1950 
1951 	slab_destroy_debugcheck(cachep, slabp);
1952 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1953 		struct slab_rcu *slab_rcu;
1954 
1955 		slab_rcu = (struct slab_rcu *)slabp;
1956 		slab_rcu->cachep = cachep;
1957 		slab_rcu->addr = addr;
1958 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1959 	} else {
1960 		kmem_freepages(cachep, addr);
1961 		if (OFF_SLAB(cachep))
1962 			kmem_cache_free(cachep->slabp_cache, slabp);
1963 	}
1964 }
1965 
1966 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1967 {
1968 	int i;
1969 	struct kmem_list3 *l3;
1970 
1971 	for_each_online_cpu(i)
1972 	    kfree(cachep->array[i]);
1973 
1974 	/* NUMA: free the list3 structures */
1975 	for_each_online_node(i) {
1976 		l3 = cachep->nodelists[i];
1977 		if (l3) {
1978 			kfree(l3->shared);
1979 			free_alien_cache(l3->alien);
1980 			kfree(l3);
1981 		}
1982 	}
1983 	kmem_cache_free(&cache_cache, cachep);
1984 }
1985 
1986 
1987 /**
1988  * calculate_slab_order - calculate size (page order) of slabs
1989  * @cachep: pointer to the cache that is being created
1990  * @size: size of objects to be created in this cache.
1991  * @align: required alignment for the objects.
1992  * @flags: slab allocation flags
1993  *
1994  * Also calculates the number of objects per slab.
1995  *
1996  * This could be made much more intelligent.  For now, try to avoid using
1997  * high order pages for slabs.  When the gfp() functions are more friendly
1998  * towards high-order requests, this should be changed.
1999  */
2000 static size_t calculate_slab_order(struct kmem_cache *cachep,
2001 			size_t size, size_t align, unsigned long flags)
2002 {
2003 	unsigned long offslab_limit;
2004 	size_t left_over = 0;
2005 	int gfporder;
2006 
2007 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2008 		unsigned int num;
2009 		size_t remainder;
2010 
2011 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2012 		if (!num)
2013 			continue;
2014 
2015 		if (flags & CFLGS_OFF_SLAB) {
2016 			/*
2017 			 * Max number of objs-per-slab for caches which
2018 			 * use off-slab slabs. Needed to avoid a possible
2019 			 * looping condition in cache_grow().
2020 			 */
2021 			offslab_limit = size - sizeof(struct slab);
2022 			offslab_limit /= sizeof(kmem_bufctl_t);
2023 
2024  			if (num > offslab_limit)
2025 				break;
2026 		}
2027 
2028 		/* Found something acceptable - save it away */
2029 		cachep->num = num;
2030 		cachep->gfporder = gfporder;
2031 		left_over = remainder;
2032 
2033 		/*
2034 		 * A VFS-reclaimable slab tends to have most allocations
2035 		 * as GFP_NOFS and we really don't want to have to be allocating
2036 		 * higher-order pages when we are unable to shrink dcache.
2037 		 */
2038 		if (flags & SLAB_RECLAIM_ACCOUNT)
2039 			break;
2040 
2041 		/*
2042 		 * Large number of objects is good, but very large slabs are
2043 		 * currently bad for the gfp()s.
2044 		 */
2045 		if (gfporder >= slab_break_gfp_order)
2046 			break;
2047 
2048 		/*
2049 		 * Acceptable internal fragmentation?
2050 		 */
2051 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2052 			break;
2053 	}
2054 	return left_over;
2055 }
2056 
2057 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2058 {
2059 	if (g_cpucache_up == FULL)
2060 		return enable_cpucache(cachep);
2061 
2062 	if (g_cpucache_up == NONE) {
2063 		/*
2064 		 * Note: the first kmem_cache_create must create the cache
2065 		 * that's used by kmalloc(24), otherwise the creation of
2066 		 * further caches will BUG().
2067 		 */
2068 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2069 
2070 		/*
2071 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2072 		 * the first cache, then we need to set up all its list3s,
2073 		 * otherwise the creation of further caches will BUG().
2074 		 */
2075 		set_up_list3s(cachep, SIZE_AC);
2076 		if (INDEX_AC == INDEX_L3)
2077 			g_cpucache_up = PARTIAL_L3;
2078 		else
2079 			g_cpucache_up = PARTIAL_AC;
2080 	} else {
2081 		cachep->array[smp_processor_id()] =
2082 			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2083 
2084 		if (g_cpucache_up == PARTIAL_AC) {
2085 			set_up_list3s(cachep, SIZE_L3);
2086 			g_cpucache_up = PARTIAL_L3;
2087 		} else {
2088 			int node;
2089 			for_each_online_node(node) {
2090 				cachep->nodelists[node] =
2091 				    kmalloc_node(sizeof(struct kmem_list3),
2092 						GFP_KERNEL, node);
2093 				BUG_ON(!cachep->nodelists[node]);
2094 				kmem_list3_init(cachep->nodelists[node]);
2095 			}
2096 		}
2097 	}
2098 	cachep->nodelists[numa_node_id()]->next_reap =
2099 			jiffies + REAPTIMEOUT_LIST3 +
2100 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2101 
2102 	cpu_cache_get(cachep)->avail = 0;
2103 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2104 	cpu_cache_get(cachep)->batchcount = 1;
2105 	cpu_cache_get(cachep)->touched = 0;
2106 	cachep->batchcount = 1;
2107 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2108 	return 0;
2109 }
2110 
2111 /**
2112  * kmem_cache_create - Create a cache.
2113  * @name: A string which is used in /proc/slabinfo to identify this cache.
2114  * @size: The size of objects to be created in this cache.
2115  * @align: The required alignment for the objects.
2116  * @flags: SLAB flags
2117  * @ctor: A constructor for the objects.
2118  *
2119  * Returns a ptr to the cache on success, NULL on failure.
2120  * Cannot be called within a int, but can be interrupted.
2121  * The @ctor is run when new pages are allocated by the cache.
2122  *
2123  * @name must be valid until the cache is destroyed. This implies that
2124  * the module calling this has to destroy the cache before getting unloaded.
2125  *
2126  * The flags are
2127  *
2128  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2129  * to catch references to uninitialised memory.
2130  *
2131  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2132  * for buffer overruns.
2133  *
2134  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2135  * cacheline.  This can be beneficial if you're counting cycles as closely
2136  * as davem.
2137  */
2138 struct kmem_cache *
2139 kmem_cache_create (const char *name, size_t size, size_t align,
2140 	unsigned long flags, void (*ctor)(void *))
2141 {
2142 	size_t left_over, slab_size, ralign;
2143 	struct kmem_cache *cachep = NULL, *pc;
2144 
2145 	/*
2146 	 * Sanity checks... these are all serious usage bugs.
2147 	 */
2148 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2149 	    size > KMALLOC_MAX_SIZE) {
2150 		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2151 				name);
2152 		BUG();
2153 	}
2154 
2155 	/*
2156 	 * We use cache_chain_mutex to ensure a consistent view of
2157 	 * cpu_online_map as well.  Please see cpuup_callback
2158 	 */
2159 	get_online_cpus();
2160 	mutex_lock(&cache_chain_mutex);
2161 
2162 	list_for_each_entry(pc, &cache_chain, next) {
2163 		char tmp;
2164 		int res;
2165 
2166 		/*
2167 		 * This happens when the module gets unloaded and doesn't
2168 		 * destroy its slab cache and no-one else reuses the vmalloc
2169 		 * area of the module.  Print a warning.
2170 		 */
2171 		res = probe_kernel_address(pc->name, tmp);
2172 		if (res) {
2173 			printk(KERN_ERR
2174 			       "SLAB: cache with size %d has lost its name\n",
2175 			       pc->buffer_size);
2176 			continue;
2177 		}
2178 
2179 		if (!strcmp(pc->name, name)) {
2180 			printk(KERN_ERR
2181 			       "kmem_cache_create: duplicate cache %s\n", name);
2182 			dump_stack();
2183 			goto oops;
2184 		}
2185 	}
2186 
2187 #if DEBUG
2188 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2189 #if FORCED_DEBUG
2190 	/*
2191 	 * Enable redzoning and last user accounting, except for caches with
2192 	 * large objects, if the increased size would increase the object size
2193 	 * above the next power of two: caches with object sizes just above a
2194 	 * power of two have a significant amount of internal fragmentation.
2195 	 */
2196 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2197 						2 * sizeof(unsigned long long)))
2198 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2199 	if (!(flags & SLAB_DESTROY_BY_RCU))
2200 		flags |= SLAB_POISON;
2201 #endif
2202 	if (flags & SLAB_DESTROY_BY_RCU)
2203 		BUG_ON(flags & SLAB_POISON);
2204 #endif
2205 	/*
2206 	 * Always checks flags, a caller might be expecting debug support which
2207 	 * isn't available.
2208 	 */
2209 	BUG_ON(flags & ~CREATE_MASK);
2210 
2211 	/*
2212 	 * Check that size is in terms of words.  This is needed to avoid
2213 	 * unaligned accesses for some archs when redzoning is used, and makes
2214 	 * sure any on-slab bufctl's are also correctly aligned.
2215 	 */
2216 	if (size & (BYTES_PER_WORD - 1)) {
2217 		size += (BYTES_PER_WORD - 1);
2218 		size &= ~(BYTES_PER_WORD - 1);
2219 	}
2220 
2221 	/* calculate the final buffer alignment: */
2222 
2223 	/* 1) arch recommendation: can be overridden for debug */
2224 	if (flags & SLAB_HWCACHE_ALIGN) {
2225 		/*
2226 		 * Default alignment: as specified by the arch code.  Except if
2227 		 * an object is really small, then squeeze multiple objects into
2228 		 * one cacheline.
2229 		 */
2230 		ralign = cache_line_size();
2231 		while (size <= ralign / 2)
2232 			ralign /= 2;
2233 	} else {
2234 		ralign = BYTES_PER_WORD;
2235 	}
2236 
2237 	/*
2238 	 * Redzoning and user store require word alignment or possibly larger.
2239 	 * Note this will be overridden by architecture or caller mandated
2240 	 * alignment if either is greater than BYTES_PER_WORD.
2241 	 */
2242 	if (flags & SLAB_STORE_USER)
2243 		ralign = BYTES_PER_WORD;
2244 
2245 	if (flags & SLAB_RED_ZONE) {
2246 		ralign = REDZONE_ALIGN;
2247 		/* If redzoning, ensure that the second redzone is suitably
2248 		 * aligned, by adjusting the object size accordingly. */
2249 		size += REDZONE_ALIGN - 1;
2250 		size &= ~(REDZONE_ALIGN - 1);
2251 	}
2252 
2253 	/* 2) arch mandated alignment */
2254 	if (ralign < ARCH_SLAB_MINALIGN) {
2255 		ralign = ARCH_SLAB_MINALIGN;
2256 	}
2257 	/* 3) caller mandated alignment */
2258 	if (ralign < align) {
2259 		ralign = align;
2260 	}
2261 	/* disable debug if necessary */
2262 	if (ralign > __alignof__(unsigned long long))
2263 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2264 	/*
2265 	 * 4) Store it.
2266 	 */
2267 	align = ralign;
2268 
2269 	/* Get cache's description obj. */
2270 	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2271 	if (!cachep)
2272 		goto oops;
2273 
2274 #if DEBUG
2275 	cachep->obj_size = size;
2276 
2277 	/*
2278 	 * Both debugging options require word-alignment which is calculated
2279 	 * into align above.
2280 	 */
2281 	if (flags & SLAB_RED_ZONE) {
2282 		/* add space for red zone words */
2283 		cachep->obj_offset += sizeof(unsigned long long);
2284 		size += 2 * sizeof(unsigned long long);
2285 	}
2286 	if (flags & SLAB_STORE_USER) {
2287 		/* user store requires one word storage behind the end of
2288 		 * the real object. But if the second red zone needs to be
2289 		 * aligned to 64 bits, we must allow that much space.
2290 		 */
2291 		if (flags & SLAB_RED_ZONE)
2292 			size += REDZONE_ALIGN;
2293 		else
2294 			size += BYTES_PER_WORD;
2295 	}
2296 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2297 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2298 	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2299 		cachep->obj_offset += PAGE_SIZE - size;
2300 		size = PAGE_SIZE;
2301 	}
2302 #endif
2303 #endif
2304 
2305 	/*
2306 	 * Determine if the slab management is 'on' or 'off' slab.
2307 	 * (bootstrapping cannot cope with offslab caches so don't do
2308 	 * it too early on.)
2309 	 */
2310 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
2311 		/*
2312 		 * Size is large, assume best to place the slab management obj
2313 		 * off-slab (should allow better packing of objs).
2314 		 */
2315 		flags |= CFLGS_OFF_SLAB;
2316 
2317 	size = ALIGN(size, align);
2318 
2319 	left_over = calculate_slab_order(cachep, size, align, flags);
2320 
2321 	if (!cachep->num) {
2322 		printk(KERN_ERR
2323 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2324 		kmem_cache_free(&cache_cache, cachep);
2325 		cachep = NULL;
2326 		goto oops;
2327 	}
2328 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2329 			  + sizeof(struct slab), align);
2330 
2331 	/*
2332 	 * If the slab has been placed off-slab, and we have enough space then
2333 	 * move it on-slab. This is at the expense of any extra colouring.
2334 	 */
2335 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2336 		flags &= ~CFLGS_OFF_SLAB;
2337 		left_over -= slab_size;
2338 	}
2339 
2340 	if (flags & CFLGS_OFF_SLAB) {
2341 		/* really off slab. No need for manual alignment */
2342 		slab_size =
2343 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2344 	}
2345 
2346 	cachep->colour_off = cache_line_size();
2347 	/* Offset must be a multiple of the alignment. */
2348 	if (cachep->colour_off < align)
2349 		cachep->colour_off = align;
2350 	cachep->colour = left_over / cachep->colour_off;
2351 	cachep->slab_size = slab_size;
2352 	cachep->flags = flags;
2353 	cachep->gfpflags = 0;
2354 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2355 		cachep->gfpflags |= GFP_DMA;
2356 	cachep->buffer_size = size;
2357 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2358 
2359 	if (flags & CFLGS_OFF_SLAB) {
2360 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2361 		/*
2362 		 * This is a possibility for one of the malloc_sizes caches.
2363 		 * But since we go off slab only for object size greater than
2364 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2365 		 * this should not happen at all.
2366 		 * But leave a BUG_ON for some lucky dude.
2367 		 */
2368 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2369 	}
2370 	cachep->ctor = ctor;
2371 	cachep->name = name;
2372 
2373 	if (setup_cpu_cache(cachep)) {
2374 		__kmem_cache_destroy(cachep);
2375 		cachep = NULL;
2376 		goto oops;
2377 	}
2378 
2379 	/* cache setup completed, link it into the list */
2380 	list_add(&cachep->next, &cache_chain);
2381 oops:
2382 	if (!cachep && (flags & SLAB_PANIC))
2383 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2384 		      name);
2385 	mutex_unlock(&cache_chain_mutex);
2386 	put_online_cpus();
2387 	return cachep;
2388 }
2389 EXPORT_SYMBOL(kmem_cache_create);
2390 
2391 #if DEBUG
2392 static void check_irq_off(void)
2393 {
2394 	BUG_ON(!irqs_disabled());
2395 }
2396 
2397 static void check_irq_on(void)
2398 {
2399 	BUG_ON(irqs_disabled());
2400 }
2401 
2402 static void check_spinlock_acquired(struct kmem_cache *cachep)
2403 {
2404 #ifdef CONFIG_SMP
2405 	check_irq_off();
2406 	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2407 #endif
2408 }
2409 
2410 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2411 {
2412 #ifdef CONFIG_SMP
2413 	check_irq_off();
2414 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2415 #endif
2416 }
2417 
2418 #else
2419 #define check_irq_off()	do { } while(0)
2420 #define check_irq_on()	do { } while(0)
2421 #define check_spinlock_acquired(x) do { } while(0)
2422 #define check_spinlock_acquired_node(x, y) do { } while(0)
2423 #endif
2424 
2425 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2426 			struct array_cache *ac,
2427 			int force, int node);
2428 
2429 static void do_drain(void *arg)
2430 {
2431 	struct kmem_cache *cachep = arg;
2432 	struct array_cache *ac;
2433 	int node = numa_node_id();
2434 
2435 	check_irq_off();
2436 	ac = cpu_cache_get(cachep);
2437 	spin_lock(&cachep->nodelists[node]->list_lock);
2438 	free_block(cachep, ac->entry, ac->avail, node);
2439 	spin_unlock(&cachep->nodelists[node]->list_lock);
2440 	ac->avail = 0;
2441 }
2442 
2443 static void drain_cpu_caches(struct kmem_cache *cachep)
2444 {
2445 	struct kmem_list3 *l3;
2446 	int node;
2447 
2448 	on_each_cpu(do_drain, cachep, 1);
2449 	check_irq_on();
2450 	for_each_online_node(node) {
2451 		l3 = cachep->nodelists[node];
2452 		if (l3 && l3->alien)
2453 			drain_alien_cache(cachep, l3->alien);
2454 	}
2455 
2456 	for_each_online_node(node) {
2457 		l3 = cachep->nodelists[node];
2458 		if (l3)
2459 			drain_array(cachep, l3, l3->shared, 1, node);
2460 	}
2461 }
2462 
2463 /*
2464  * Remove slabs from the list of free slabs.
2465  * Specify the number of slabs to drain in tofree.
2466  *
2467  * Returns the actual number of slabs released.
2468  */
2469 static int drain_freelist(struct kmem_cache *cache,
2470 			struct kmem_list3 *l3, int tofree)
2471 {
2472 	struct list_head *p;
2473 	int nr_freed;
2474 	struct slab *slabp;
2475 
2476 	nr_freed = 0;
2477 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2478 
2479 		spin_lock_irq(&l3->list_lock);
2480 		p = l3->slabs_free.prev;
2481 		if (p == &l3->slabs_free) {
2482 			spin_unlock_irq(&l3->list_lock);
2483 			goto out;
2484 		}
2485 
2486 		slabp = list_entry(p, struct slab, list);
2487 #if DEBUG
2488 		BUG_ON(slabp->inuse);
2489 #endif
2490 		list_del(&slabp->list);
2491 		/*
2492 		 * Safe to drop the lock. The slab is no longer linked
2493 		 * to the cache.
2494 		 */
2495 		l3->free_objects -= cache->num;
2496 		spin_unlock_irq(&l3->list_lock);
2497 		slab_destroy(cache, slabp);
2498 		nr_freed++;
2499 	}
2500 out:
2501 	return nr_freed;
2502 }
2503 
2504 /* Called with cache_chain_mutex held to protect against cpu hotplug */
2505 static int __cache_shrink(struct kmem_cache *cachep)
2506 {
2507 	int ret = 0, i = 0;
2508 	struct kmem_list3 *l3;
2509 
2510 	drain_cpu_caches(cachep);
2511 
2512 	check_irq_on();
2513 	for_each_online_node(i) {
2514 		l3 = cachep->nodelists[i];
2515 		if (!l3)
2516 			continue;
2517 
2518 		drain_freelist(cachep, l3, l3->free_objects);
2519 
2520 		ret += !list_empty(&l3->slabs_full) ||
2521 			!list_empty(&l3->slabs_partial);
2522 	}
2523 	return (ret ? 1 : 0);
2524 }
2525 
2526 /**
2527  * kmem_cache_shrink - Shrink a cache.
2528  * @cachep: The cache to shrink.
2529  *
2530  * Releases as many slabs as possible for a cache.
2531  * To help debugging, a zero exit status indicates all slabs were released.
2532  */
2533 int kmem_cache_shrink(struct kmem_cache *cachep)
2534 {
2535 	int ret;
2536 	BUG_ON(!cachep || in_interrupt());
2537 
2538 	get_online_cpus();
2539 	mutex_lock(&cache_chain_mutex);
2540 	ret = __cache_shrink(cachep);
2541 	mutex_unlock(&cache_chain_mutex);
2542 	put_online_cpus();
2543 	return ret;
2544 }
2545 EXPORT_SYMBOL(kmem_cache_shrink);
2546 
2547 /**
2548  * kmem_cache_destroy - delete a cache
2549  * @cachep: the cache to destroy
2550  *
2551  * Remove a &struct kmem_cache object from the slab cache.
2552  *
2553  * It is expected this function will be called by a module when it is
2554  * unloaded.  This will remove the cache completely, and avoid a duplicate
2555  * cache being allocated each time a module is loaded and unloaded, if the
2556  * module doesn't have persistent in-kernel storage across loads and unloads.
2557  *
2558  * The cache must be empty before calling this function.
2559  *
2560  * The caller must guarantee that noone will allocate memory from the cache
2561  * during the kmem_cache_destroy().
2562  */
2563 void kmem_cache_destroy(struct kmem_cache *cachep)
2564 {
2565 	BUG_ON(!cachep || in_interrupt());
2566 
2567 	/* Find the cache in the chain of caches. */
2568 	get_online_cpus();
2569 	mutex_lock(&cache_chain_mutex);
2570 	/*
2571 	 * the chain is never empty, cache_cache is never destroyed
2572 	 */
2573 	list_del(&cachep->next);
2574 	if (__cache_shrink(cachep)) {
2575 		slab_error(cachep, "Can't free all objects");
2576 		list_add(&cachep->next, &cache_chain);
2577 		mutex_unlock(&cache_chain_mutex);
2578 		put_online_cpus();
2579 		return;
2580 	}
2581 
2582 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2583 		synchronize_rcu();
2584 
2585 	__kmem_cache_destroy(cachep);
2586 	mutex_unlock(&cache_chain_mutex);
2587 	put_online_cpus();
2588 }
2589 EXPORT_SYMBOL(kmem_cache_destroy);
2590 
2591 /*
2592  * Get the memory for a slab management obj.
2593  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2594  * always come from malloc_sizes caches.  The slab descriptor cannot
2595  * come from the same cache which is getting created because,
2596  * when we are searching for an appropriate cache for these
2597  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2598  * If we are creating a malloc_sizes cache here it would not be visible to
2599  * kmem_find_general_cachep till the initialization is complete.
2600  * Hence we cannot have slabp_cache same as the original cache.
2601  */
2602 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2603 				   int colour_off, gfp_t local_flags,
2604 				   int nodeid)
2605 {
2606 	struct slab *slabp;
2607 
2608 	if (OFF_SLAB(cachep)) {
2609 		/* Slab management obj is off-slab. */
2610 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2611 					      local_flags & ~GFP_THISNODE, nodeid);
2612 		if (!slabp)
2613 			return NULL;
2614 	} else {
2615 		slabp = objp + colour_off;
2616 		colour_off += cachep->slab_size;
2617 	}
2618 	slabp->inuse = 0;
2619 	slabp->colouroff = colour_off;
2620 	slabp->s_mem = objp + colour_off;
2621 	slabp->nodeid = nodeid;
2622 	slabp->free = 0;
2623 	return slabp;
2624 }
2625 
2626 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2627 {
2628 	return (kmem_bufctl_t *) (slabp + 1);
2629 }
2630 
2631 static void cache_init_objs(struct kmem_cache *cachep,
2632 			    struct slab *slabp)
2633 {
2634 	int i;
2635 
2636 	for (i = 0; i < cachep->num; i++) {
2637 		void *objp = index_to_obj(cachep, slabp, i);
2638 #if DEBUG
2639 		/* need to poison the objs? */
2640 		if (cachep->flags & SLAB_POISON)
2641 			poison_obj(cachep, objp, POISON_FREE);
2642 		if (cachep->flags & SLAB_STORE_USER)
2643 			*dbg_userword(cachep, objp) = NULL;
2644 
2645 		if (cachep->flags & SLAB_RED_ZONE) {
2646 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2647 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2648 		}
2649 		/*
2650 		 * Constructors are not allowed to allocate memory from the same
2651 		 * cache which they are a constructor for.  Otherwise, deadlock.
2652 		 * They must also be threaded.
2653 		 */
2654 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2655 			cachep->ctor(objp + obj_offset(cachep));
2656 
2657 		if (cachep->flags & SLAB_RED_ZONE) {
2658 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2659 				slab_error(cachep, "constructor overwrote the"
2660 					   " end of an object");
2661 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2662 				slab_error(cachep, "constructor overwrote the"
2663 					   " start of an object");
2664 		}
2665 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2666 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2667 			kernel_map_pages(virt_to_page(objp),
2668 					 cachep->buffer_size / PAGE_SIZE, 0);
2669 #else
2670 		if (cachep->ctor)
2671 			cachep->ctor(objp);
2672 #endif
2673 		slab_bufctl(slabp)[i] = i + 1;
2674 	}
2675 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2676 }
2677 
2678 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2679 {
2680 	if (CONFIG_ZONE_DMA_FLAG) {
2681 		if (flags & GFP_DMA)
2682 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2683 		else
2684 			BUG_ON(cachep->gfpflags & GFP_DMA);
2685 	}
2686 }
2687 
2688 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2689 				int nodeid)
2690 {
2691 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2692 	kmem_bufctl_t next;
2693 
2694 	slabp->inuse++;
2695 	next = slab_bufctl(slabp)[slabp->free];
2696 #if DEBUG
2697 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2698 	WARN_ON(slabp->nodeid != nodeid);
2699 #endif
2700 	slabp->free = next;
2701 
2702 	return objp;
2703 }
2704 
2705 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2706 				void *objp, int nodeid)
2707 {
2708 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2709 
2710 #if DEBUG
2711 	/* Verify that the slab belongs to the intended node */
2712 	WARN_ON(slabp->nodeid != nodeid);
2713 
2714 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2715 		printk(KERN_ERR "slab: double free detected in cache "
2716 				"'%s', objp %p\n", cachep->name, objp);
2717 		BUG();
2718 	}
2719 #endif
2720 	slab_bufctl(slabp)[objnr] = slabp->free;
2721 	slabp->free = objnr;
2722 	slabp->inuse--;
2723 }
2724 
2725 /*
2726  * Map pages beginning at addr to the given cache and slab. This is required
2727  * for the slab allocator to be able to lookup the cache and slab of a
2728  * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2729  */
2730 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2731 			   void *addr)
2732 {
2733 	int nr_pages;
2734 	struct page *page;
2735 
2736 	page = virt_to_page(addr);
2737 
2738 	nr_pages = 1;
2739 	if (likely(!PageCompound(page)))
2740 		nr_pages <<= cache->gfporder;
2741 
2742 	do {
2743 		page_set_cache(page, cache);
2744 		page_set_slab(page, slab);
2745 		page++;
2746 	} while (--nr_pages);
2747 }
2748 
2749 /*
2750  * Grow (by 1) the number of slabs within a cache.  This is called by
2751  * kmem_cache_alloc() when there are no active objs left in a cache.
2752  */
2753 static int cache_grow(struct kmem_cache *cachep,
2754 		gfp_t flags, int nodeid, void *objp)
2755 {
2756 	struct slab *slabp;
2757 	size_t offset;
2758 	gfp_t local_flags;
2759 	struct kmem_list3 *l3;
2760 
2761 	/*
2762 	 * Be lazy and only check for valid flags here,  keeping it out of the
2763 	 * critical path in kmem_cache_alloc().
2764 	 */
2765 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2766 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2767 
2768 	/* Take the l3 list lock to change the colour_next on this node */
2769 	check_irq_off();
2770 	l3 = cachep->nodelists[nodeid];
2771 	spin_lock(&l3->list_lock);
2772 
2773 	/* Get colour for the slab, and cal the next value. */
2774 	offset = l3->colour_next;
2775 	l3->colour_next++;
2776 	if (l3->colour_next >= cachep->colour)
2777 		l3->colour_next = 0;
2778 	spin_unlock(&l3->list_lock);
2779 
2780 	offset *= cachep->colour_off;
2781 
2782 	if (local_flags & __GFP_WAIT)
2783 		local_irq_enable();
2784 
2785 	/*
2786 	 * The test for missing atomic flag is performed here, rather than
2787 	 * the more obvious place, simply to reduce the critical path length
2788 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2789 	 * will eventually be caught here (where it matters).
2790 	 */
2791 	kmem_flagcheck(cachep, flags);
2792 
2793 	/*
2794 	 * Get mem for the objs.  Attempt to allocate a physical page from
2795 	 * 'nodeid'.
2796 	 */
2797 	if (!objp)
2798 		objp = kmem_getpages(cachep, local_flags, nodeid);
2799 	if (!objp)
2800 		goto failed;
2801 
2802 	/* Get slab management. */
2803 	slabp = alloc_slabmgmt(cachep, objp, offset,
2804 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2805 	if (!slabp)
2806 		goto opps1;
2807 
2808 	slab_map_pages(cachep, slabp, objp);
2809 
2810 	cache_init_objs(cachep, slabp);
2811 
2812 	if (local_flags & __GFP_WAIT)
2813 		local_irq_disable();
2814 	check_irq_off();
2815 	spin_lock(&l3->list_lock);
2816 
2817 	/* Make slab active. */
2818 	list_add_tail(&slabp->list, &(l3->slabs_free));
2819 	STATS_INC_GROWN(cachep);
2820 	l3->free_objects += cachep->num;
2821 	spin_unlock(&l3->list_lock);
2822 	return 1;
2823 opps1:
2824 	kmem_freepages(cachep, objp);
2825 failed:
2826 	if (local_flags & __GFP_WAIT)
2827 		local_irq_disable();
2828 	return 0;
2829 }
2830 
2831 #if DEBUG
2832 
2833 /*
2834  * Perform extra freeing checks:
2835  * - detect bad pointers.
2836  * - POISON/RED_ZONE checking
2837  */
2838 static void kfree_debugcheck(const void *objp)
2839 {
2840 	if (!virt_addr_valid(objp)) {
2841 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2842 		       (unsigned long)objp);
2843 		BUG();
2844 	}
2845 }
2846 
2847 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2848 {
2849 	unsigned long long redzone1, redzone2;
2850 
2851 	redzone1 = *dbg_redzone1(cache, obj);
2852 	redzone2 = *dbg_redzone2(cache, obj);
2853 
2854 	/*
2855 	 * Redzone is ok.
2856 	 */
2857 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2858 		return;
2859 
2860 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2861 		slab_error(cache, "double free detected");
2862 	else
2863 		slab_error(cache, "memory outside object was overwritten");
2864 
2865 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2866 			obj, redzone1, redzone2);
2867 }
2868 
2869 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2870 				   void *caller)
2871 {
2872 	struct page *page;
2873 	unsigned int objnr;
2874 	struct slab *slabp;
2875 
2876 	BUG_ON(virt_to_cache(objp) != cachep);
2877 
2878 	objp -= obj_offset(cachep);
2879 	kfree_debugcheck(objp);
2880 	page = virt_to_head_page(objp);
2881 
2882 	slabp = page_get_slab(page);
2883 
2884 	if (cachep->flags & SLAB_RED_ZONE) {
2885 		verify_redzone_free(cachep, objp);
2886 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2887 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2888 	}
2889 	if (cachep->flags & SLAB_STORE_USER)
2890 		*dbg_userword(cachep, objp) = caller;
2891 
2892 	objnr = obj_to_index(cachep, slabp, objp);
2893 
2894 	BUG_ON(objnr >= cachep->num);
2895 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2896 
2897 #ifdef CONFIG_DEBUG_SLAB_LEAK
2898 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2899 #endif
2900 	if (cachep->flags & SLAB_POISON) {
2901 #ifdef CONFIG_DEBUG_PAGEALLOC
2902 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2903 			store_stackinfo(cachep, objp, (unsigned long)caller);
2904 			kernel_map_pages(virt_to_page(objp),
2905 					 cachep->buffer_size / PAGE_SIZE, 0);
2906 		} else {
2907 			poison_obj(cachep, objp, POISON_FREE);
2908 		}
2909 #else
2910 		poison_obj(cachep, objp, POISON_FREE);
2911 #endif
2912 	}
2913 	return objp;
2914 }
2915 
2916 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2917 {
2918 	kmem_bufctl_t i;
2919 	int entries = 0;
2920 
2921 	/* Check slab's freelist to see if this obj is there. */
2922 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2923 		entries++;
2924 		if (entries > cachep->num || i >= cachep->num)
2925 			goto bad;
2926 	}
2927 	if (entries != cachep->num - slabp->inuse) {
2928 bad:
2929 		printk(KERN_ERR "slab: Internal list corruption detected in "
2930 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2931 			cachep->name, cachep->num, slabp, slabp->inuse);
2932 		for (i = 0;
2933 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2934 		     i++) {
2935 			if (i % 16 == 0)
2936 				printk("\n%03x:", i);
2937 			printk(" %02x", ((unsigned char *)slabp)[i]);
2938 		}
2939 		printk("\n");
2940 		BUG();
2941 	}
2942 }
2943 #else
2944 #define kfree_debugcheck(x) do { } while(0)
2945 #define cache_free_debugcheck(x,objp,z) (objp)
2946 #define check_slabp(x,y) do { } while(0)
2947 #endif
2948 
2949 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2950 {
2951 	int batchcount;
2952 	struct kmem_list3 *l3;
2953 	struct array_cache *ac;
2954 	int node;
2955 
2956 retry:
2957 	check_irq_off();
2958 	node = numa_node_id();
2959 	ac = cpu_cache_get(cachep);
2960 	batchcount = ac->batchcount;
2961 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2962 		/*
2963 		 * If there was little recent activity on this cache, then
2964 		 * perform only a partial refill.  Otherwise we could generate
2965 		 * refill bouncing.
2966 		 */
2967 		batchcount = BATCHREFILL_LIMIT;
2968 	}
2969 	l3 = cachep->nodelists[node];
2970 
2971 	BUG_ON(ac->avail > 0 || !l3);
2972 	spin_lock(&l3->list_lock);
2973 
2974 	/* See if we can refill from the shared array */
2975 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2976 		goto alloc_done;
2977 
2978 	while (batchcount > 0) {
2979 		struct list_head *entry;
2980 		struct slab *slabp;
2981 		/* Get slab alloc is to come from. */
2982 		entry = l3->slabs_partial.next;
2983 		if (entry == &l3->slabs_partial) {
2984 			l3->free_touched = 1;
2985 			entry = l3->slabs_free.next;
2986 			if (entry == &l3->slabs_free)
2987 				goto must_grow;
2988 		}
2989 
2990 		slabp = list_entry(entry, struct slab, list);
2991 		check_slabp(cachep, slabp);
2992 		check_spinlock_acquired(cachep);
2993 
2994 		/*
2995 		 * The slab was either on partial or free list so
2996 		 * there must be at least one object available for
2997 		 * allocation.
2998 		 */
2999 		BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
3000 
3001 		while (slabp->inuse < cachep->num && batchcount--) {
3002 			STATS_INC_ALLOCED(cachep);
3003 			STATS_INC_ACTIVE(cachep);
3004 			STATS_SET_HIGH(cachep);
3005 
3006 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3007 							    node);
3008 		}
3009 		check_slabp(cachep, slabp);
3010 
3011 		/* move slabp to correct slabp list: */
3012 		list_del(&slabp->list);
3013 		if (slabp->free == BUFCTL_END)
3014 			list_add(&slabp->list, &l3->slabs_full);
3015 		else
3016 			list_add(&slabp->list, &l3->slabs_partial);
3017 	}
3018 
3019 must_grow:
3020 	l3->free_objects -= ac->avail;
3021 alloc_done:
3022 	spin_unlock(&l3->list_lock);
3023 
3024 	if (unlikely(!ac->avail)) {
3025 		int x;
3026 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3027 
3028 		/* cache_grow can reenable interrupts, then ac could change. */
3029 		ac = cpu_cache_get(cachep);
3030 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3031 			return NULL;
3032 
3033 		if (!ac->avail)		/* objects refilled by interrupt? */
3034 			goto retry;
3035 	}
3036 	ac->touched = 1;
3037 	return ac->entry[--ac->avail];
3038 }
3039 
3040 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3041 						gfp_t flags)
3042 {
3043 	might_sleep_if(flags & __GFP_WAIT);
3044 #if DEBUG
3045 	kmem_flagcheck(cachep, flags);
3046 #endif
3047 }
3048 
3049 #if DEBUG
3050 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3051 				gfp_t flags, void *objp, void *caller)
3052 {
3053 	if (!objp)
3054 		return objp;
3055 	if (cachep->flags & SLAB_POISON) {
3056 #ifdef CONFIG_DEBUG_PAGEALLOC
3057 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3058 			kernel_map_pages(virt_to_page(objp),
3059 					 cachep->buffer_size / PAGE_SIZE, 1);
3060 		else
3061 			check_poison_obj(cachep, objp);
3062 #else
3063 		check_poison_obj(cachep, objp);
3064 #endif
3065 		poison_obj(cachep, objp, POISON_INUSE);
3066 	}
3067 	if (cachep->flags & SLAB_STORE_USER)
3068 		*dbg_userword(cachep, objp) = caller;
3069 
3070 	if (cachep->flags & SLAB_RED_ZONE) {
3071 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3072 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3073 			slab_error(cachep, "double free, or memory outside"
3074 						" object was overwritten");
3075 			printk(KERN_ERR
3076 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3077 				objp, *dbg_redzone1(cachep, objp),
3078 				*dbg_redzone2(cachep, objp));
3079 		}
3080 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3081 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3082 	}
3083 #ifdef CONFIG_DEBUG_SLAB_LEAK
3084 	{
3085 		struct slab *slabp;
3086 		unsigned objnr;
3087 
3088 		slabp = page_get_slab(virt_to_head_page(objp));
3089 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3090 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3091 	}
3092 #endif
3093 	objp += obj_offset(cachep);
3094 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3095 		cachep->ctor(objp);
3096 #if ARCH_SLAB_MINALIGN
3097 	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3098 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3099 		       objp, ARCH_SLAB_MINALIGN);
3100 	}
3101 #endif
3102 	return objp;
3103 }
3104 #else
3105 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3106 #endif
3107 
3108 #ifdef CONFIG_FAILSLAB
3109 
3110 static struct failslab_attr {
3111 
3112 	struct fault_attr attr;
3113 
3114 	u32 ignore_gfp_wait;
3115 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3116 	struct dentry *ignore_gfp_wait_file;
3117 #endif
3118 
3119 } failslab = {
3120 	.attr = FAULT_ATTR_INITIALIZER,
3121 	.ignore_gfp_wait = 1,
3122 };
3123 
3124 static int __init setup_failslab(char *str)
3125 {
3126 	return setup_fault_attr(&failslab.attr, str);
3127 }
3128 __setup("failslab=", setup_failslab);
3129 
3130 static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3131 {
3132 	if (cachep == &cache_cache)
3133 		return 0;
3134 	if (flags & __GFP_NOFAIL)
3135 		return 0;
3136 	if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3137 		return 0;
3138 
3139 	return should_fail(&failslab.attr, obj_size(cachep));
3140 }
3141 
3142 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3143 
3144 static int __init failslab_debugfs(void)
3145 {
3146 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3147 	struct dentry *dir;
3148 	int err;
3149 
3150 	err = init_fault_attr_dentries(&failslab.attr, "failslab");
3151 	if (err)
3152 		return err;
3153 	dir = failslab.attr.dentries.dir;
3154 
3155 	failslab.ignore_gfp_wait_file =
3156 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
3157 				      &failslab.ignore_gfp_wait);
3158 
3159 	if (!failslab.ignore_gfp_wait_file) {
3160 		err = -ENOMEM;
3161 		debugfs_remove(failslab.ignore_gfp_wait_file);
3162 		cleanup_fault_attr_dentries(&failslab.attr);
3163 	}
3164 
3165 	return err;
3166 }
3167 
3168 late_initcall(failslab_debugfs);
3169 
3170 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3171 
3172 #else /* CONFIG_FAILSLAB */
3173 
3174 static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3175 {
3176 	return 0;
3177 }
3178 
3179 #endif /* CONFIG_FAILSLAB */
3180 
3181 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3182 {
3183 	void *objp;
3184 	struct array_cache *ac;
3185 
3186 	check_irq_off();
3187 
3188 	ac = cpu_cache_get(cachep);
3189 	if (likely(ac->avail)) {
3190 		STATS_INC_ALLOCHIT(cachep);
3191 		ac->touched = 1;
3192 		objp = ac->entry[--ac->avail];
3193 	} else {
3194 		STATS_INC_ALLOCMISS(cachep);
3195 		objp = cache_alloc_refill(cachep, flags);
3196 	}
3197 	return objp;
3198 }
3199 
3200 #ifdef CONFIG_NUMA
3201 /*
3202  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3203  *
3204  * If we are in_interrupt, then process context, including cpusets and
3205  * mempolicy, may not apply and should not be used for allocation policy.
3206  */
3207 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3208 {
3209 	int nid_alloc, nid_here;
3210 
3211 	if (in_interrupt() || (flags & __GFP_THISNODE))
3212 		return NULL;
3213 	nid_alloc = nid_here = numa_node_id();
3214 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3215 		nid_alloc = cpuset_mem_spread_node();
3216 	else if (current->mempolicy)
3217 		nid_alloc = slab_node(current->mempolicy);
3218 	if (nid_alloc != nid_here)
3219 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3220 	return NULL;
3221 }
3222 
3223 /*
3224  * Fallback function if there was no memory available and no objects on a
3225  * certain node and fall back is permitted. First we scan all the
3226  * available nodelists for available objects. If that fails then we
3227  * perform an allocation without specifying a node. This allows the page
3228  * allocator to do its reclaim / fallback magic. We then insert the
3229  * slab into the proper nodelist and then allocate from it.
3230  */
3231 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3232 {
3233 	struct zonelist *zonelist;
3234 	gfp_t local_flags;
3235 	struct zoneref *z;
3236 	struct zone *zone;
3237 	enum zone_type high_zoneidx = gfp_zone(flags);
3238 	void *obj = NULL;
3239 	int nid;
3240 
3241 	if (flags & __GFP_THISNODE)
3242 		return NULL;
3243 
3244 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3245 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3246 
3247 retry:
3248 	/*
3249 	 * Look through allowed nodes for objects available
3250 	 * from existing per node queues.
3251 	 */
3252 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3253 		nid = zone_to_nid(zone);
3254 
3255 		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3256 			cache->nodelists[nid] &&
3257 			cache->nodelists[nid]->free_objects) {
3258 				obj = ____cache_alloc_node(cache,
3259 					flags | GFP_THISNODE, nid);
3260 				if (obj)
3261 					break;
3262 		}
3263 	}
3264 
3265 	if (!obj) {
3266 		/*
3267 		 * This allocation will be performed within the constraints
3268 		 * of the current cpuset / memory policy requirements.
3269 		 * We may trigger various forms of reclaim on the allowed
3270 		 * set and go into memory reserves if necessary.
3271 		 */
3272 		if (local_flags & __GFP_WAIT)
3273 			local_irq_enable();
3274 		kmem_flagcheck(cache, flags);
3275 		obj = kmem_getpages(cache, local_flags, -1);
3276 		if (local_flags & __GFP_WAIT)
3277 			local_irq_disable();
3278 		if (obj) {
3279 			/*
3280 			 * Insert into the appropriate per node queues
3281 			 */
3282 			nid = page_to_nid(virt_to_page(obj));
3283 			if (cache_grow(cache, flags, nid, obj)) {
3284 				obj = ____cache_alloc_node(cache,
3285 					flags | GFP_THISNODE, nid);
3286 				if (!obj)
3287 					/*
3288 					 * Another processor may allocate the
3289 					 * objects in the slab since we are
3290 					 * not holding any locks.
3291 					 */
3292 					goto retry;
3293 			} else {
3294 				/* cache_grow already freed obj */
3295 				obj = NULL;
3296 			}
3297 		}
3298 	}
3299 	return obj;
3300 }
3301 
3302 /*
3303  * A interface to enable slab creation on nodeid
3304  */
3305 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3306 				int nodeid)
3307 {
3308 	struct list_head *entry;
3309 	struct slab *slabp;
3310 	struct kmem_list3 *l3;
3311 	void *obj;
3312 	int x;
3313 
3314 	l3 = cachep->nodelists[nodeid];
3315 	BUG_ON(!l3);
3316 
3317 retry:
3318 	check_irq_off();
3319 	spin_lock(&l3->list_lock);
3320 	entry = l3->slabs_partial.next;
3321 	if (entry == &l3->slabs_partial) {
3322 		l3->free_touched = 1;
3323 		entry = l3->slabs_free.next;
3324 		if (entry == &l3->slabs_free)
3325 			goto must_grow;
3326 	}
3327 
3328 	slabp = list_entry(entry, struct slab, list);
3329 	check_spinlock_acquired_node(cachep, nodeid);
3330 	check_slabp(cachep, slabp);
3331 
3332 	STATS_INC_NODEALLOCS(cachep);
3333 	STATS_INC_ACTIVE(cachep);
3334 	STATS_SET_HIGH(cachep);
3335 
3336 	BUG_ON(slabp->inuse == cachep->num);
3337 
3338 	obj = slab_get_obj(cachep, slabp, nodeid);
3339 	check_slabp(cachep, slabp);
3340 	l3->free_objects--;
3341 	/* move slabp to correct slabp list: */
3342 	list_del(&slabp->list);
3343 
3344 	if (slabp->free == BUFCTL_END)
3345 		list_add(&slabp->list, &l3->slabs_full);
3346 	else
3347 		list_add(&slabp->list, &l3->slabs_partial);
3348 
3349 	spin_unlock(&l3->list_lock);
3350 	goto done;
3351 
3352 must_grow:
3353 	spin_unlock(&l3->list_lock);
3354 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3355 	if (x)
3356 		goto retry;
3357 
3358 	return fallback_alloc(cachep, flags);
3359 
3360 done:
3361 	return obj;
3362 }
3363 
3364 /**
3365  * kmem_cache_alloc_node - Allocate an object on the specified node
3366  * @cachep: The cache to allocate from.
3367  * @flags: See kmalloc().
3368  * @nodeid: node number of the target node.
3369  * @caller: return address of caller, used for debug information
3370  *
3371  * Identical to kmem_cache_alloc but it will allocate memory on the given
3372  * node, which can improve the performance for cpu bound structures.
3373  *
3374  * Fallback to other node is possible if __GFP_THISNODE is not set.
3375  */
3376 static __always_inline void *
3377 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3378 		   void *caller)
3379 {
3380 	unsigned long save_flags;
3381 	void *ptr;
3382 
3383 	if (should_failslab(cachep, flags))
3384 		return NULL;
3385 
3386 	cache_alloc_debugcheck_before(cachep, flags);
3387 	local_irq_save(save_flags);
3388 
3389 	if (unlikely(nodeid == -1))
3390 		nodeid = numa_node_id();
3391 
3392 	if (unlikely(!cachep->nodelists[nodeid])) {
3393 		/* Node not bootstrapped yet */
3394 		ptr = fallback_alloc(cachep, flags);
3395 		goto out;
3396 	}
3397 
3398 	if (nodeid == numa_node_id()) {
3399 		/*
3400 		 * Use the locally cached objects if possible.
3401 		 * However ____cache_alloc does not allow fallback
3402 		 * to other nodes. It may fail while we still have
3403 		 * objects on other nodes available.
3404 		 */
3405 		ptr = ____cache_alloc(cachep, flags);
3406 		if (ptr)
3407 			goto out;
3408 	}
3409 	/* ___cache_alloc_node can fall back to other nodes */
3410 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3411   out:
3412 	local_irq_restore(save_flags);
3413 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3414 
3415 	if (unlikely((flags & __GFP_ZERO) && ptr))
3416 		memset(ptr, 0, obj_size(cachep));
3417 
3418 	return ptr;
3419 }
3420 
3421 static __always_inline void *
3422 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3423 {
3424 	void *objp;
3425 
3426 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3427 		objp = alternate_node_alloc(cache, flags);
3428 		if (objp)
3429 			goto out;
3430 	}
3431 	objp = ____cache_alloc(cache, flags);
3432 
3433 	/*
3434 	 * We may just have run out of memory on the local node.
3435 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3436 	 */
3437  	if (!objp)
3438  		objp = ____cache_alloc_node(cache, flags, numa_node_id());
3439 
3440   out:
3441 	return objp;
3442 }
3443 #else
3444 
3445 static __always_inline void *
3446 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3447 {
3448 	return ____cache_alloc(cachep, flags);
3449 }
3450 
3451 #endif /* CONFIG_NUMA */
3452 
3453 static __always_inline void *
3454 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3455 {
3456 	unsigned long save_flags;
3457 	void *objp;
3458 
3459 	if (should_failslab(cachep, flags))
3460 		return NULL;
3461 
3462 	cache_alloc_debugcheck_before(cachep, flags);
3463 	local_irq_save(save_flags);
3464 	objp = __do_cache_alloc(cachep, flags);
3465 	local_irq_restore(save_flags);
3466 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3467 	prefetchw(objp);
3468 
3469 	if (unlikely((flags & __GFP_ZERO) && objp))
3470 		memset(objp, 0, obj_size(cachep));
3471 
3472 	return objp;
3473 }
3474 
3475 /*
3476  * Caller needs to acquire correct kmem_list's list_lock
3477  */
3478 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3479 		       int node)
3480 {
3481 	int i;
3482 	struct kmem_list3 *l3;
3483 
3484 	for (i = 0; i < nr_objects; i++) {
3485 		void *objp = objpp[i];
3486 		struct slab *slabp;
3487 
3488 		slabp = virt_to_slab(objp);
3489 		l3 = cachep->nodelists[node];
3490 		list_del(&slabp->list);
3491 		check_spinlock_acquired_node(cachep, node);
3492 		check_slabp(cachep, slabp);
3493 		slab_put_obj(cachep, slabp, objp, node);
3494 		STATS_DEC_ACTIVE(cachep);
3495 		l3->free_objects++;
3496 		check_slabp(cachep, slabp);
3497 
3498 		/* fixup slab chains */
3499 		if (slabp->inuse == 0) {
3500 			if (l3->free_objects > l3->free_limit) {
3501 				l3->free_objects -= cachep->num;
3502 				/* No need to drop any previously held
3503 				 * lock here, even if we have a off-slab slab
3504 				 * descriptor it is guaranteed to come from
3505 				 * a different cache, refer to comments before
3506 				 * alloc_slabmgmt.
3507 				 */
3508 				slab_destroy(cachep, slabp);
3509 			} else {
3510 				list_add(&slabp->list, &l3->slabs_free);
3511 			}
3512 		} else {
3513 			/* Unconditionally move a slab to the end of the
3514 			 * partial list on free - maximum time for the
3515 			 * other objects to be freed, too.
3516 			 */
3517 			list_add_tail(&slabp->list, &l3->slabs_partial);
3518 		}
3519 	}
3520 }
3521 
3522 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3523 {
3524 	int batchcount;
3525 	struct kmem_list3 *l3;
3526 	int node = numa_node_id();
3527 
3528 	batchcount = ac->batchcount;
3529 #if DEBUG
3530 	BUG_ON(!batchcount || batchcount > ac->avail);
3531 #endif
3532 	check_irq_off();
3533 	l3 = cachep->nodelists[node];
3534 	spin_lock(&l3->list_lock);
3535 	if (l3->shared) {
3536 		struct array_cache *shared_array = l3->shared;
3537 		int max = shared_array->limit - shared_array->avail;
3538 		if (max) {
3539 			if (batchcount > max)
3540 				batchcount = max;
3541 			memcpy(&(shared_array->entry[shared_array->avail]),
3542 			       ac->entry, sizeof(void *) * batchcount);
3543 			shared_array->avail += batchcount;
3544 			goto free_done;
3545 		}
3546 	}
3547 
3548 	free_block(cachep, ac->entry, batchcount, node);
3549 free_done:
3550 #if STATS
3551 	{
3552 		int i = 0;
3553 		struct list_head *p;
3554 
3555 		p = l3->slabs_free.next;
3556 		while (p != &(l3->slabs_free)) {
3557 			struct slab *slabp;
3558 
3559 			slabp = list_entry(p, struct slab, list);
3560 			BUG_ON(slabp->inuse);
3561 
3562 			i++;
3563 			p = p->next;
3564 		}
3565 		STATS_SET_FREEABLE(cachep, i);
3566 	}
3567 #endif
3568 	spin_unlock(&l3->list_lock);
3569 	ac->avail -= batchcount;
3570 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3571 }
3572 
3573 /*
3574  * Release an obj back to its cache. If the obj has a constructed state, it must
3575  * be in this state _before_ it is released.  Called with disabled ints.
3576  */
3577 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3578 {
3579 	struct array_cache *ac = cpu_cache_get(cachep);
3580 
3581 	check_irq_off();
3582 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3583 
3584 	/*
3585 	 * Skip calling cache_free_alien() when the platform is not numa.
3586 	 * This will avoid cache misses that happen while accessing slabp (which
3587 	 * is per page memory  reference) to get nodeid. Instead use a global
3588 	 * variable to skip the call, which is mostly likely to be present in
3589 	 * the cache.
3590 	 */
3591 	if (numa_platform && cache_free_alien(cachep, objp))
3592 		return;
3593 
3594 	if (likely(ac->avail < ac->limit)) {
3595 		STATS_INC_FREEHIT(cachep);
3596 		ac->entry[ac->avail++] = objp;
3597 		return;
3598 	} else {
3599 		STATS_INC_FREEMISS(cachep);
3600 		cache_flusharray(cachep, ac);
3601 		ac->entry[ac->avail++] = objp;
3602 	}
3603 }
3604 
3605 /**
3606  * kmem_cache_alloc - Allocate an object
3607  * @cachep: The cache to allocate from.
3608  * @flags: See kmalloc().
3609  *
3610  * Allocate an object from this cache.  The flags are only relevant
3611  * if the cache has no available objects.
3612  */
3613 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3614 {
3615 	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3616 }
3617 EXPORT_SYMBOL(kmem_cache_alloc);
3618 
3619 /**
3620  * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3621  * @cachep: the cache we're checking against
3622  * @ptr: pointer to validate
3623  *
3624  * This verifies that the untrusted pointer looks sane;
3625  * it is _not_ a guarantee that the pointer is actually
3626  * part of the slab cache in question, but it at least
3627  * validates that the pointer can be dereferenced and
3628  * looks half-way sane.
3629  *
3630  * Currently only used for dentry validation.
3631  */
3632 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3633 {
3634 	unsigned long addr = (unsigned long)ptr;
3635 	unsigned long min_addr = PAGE_OFFSET;
3636 	unsigned long align_mask = BYTES_PER_WORD - 1;
3637 	unsigned long size = cachep->buffer_size;
3638 	struct page *page;
3639 
3640 	if (unlikely(addr < min_addr))
3641 		goto out;
3642 	if (unlikely(addr > (unsigned long)high_memory - size))
3643 		goto out;
3644 	if (unlikely(addr & align_mask))
3645 		goto out;
3646 	if (unlikely(!kern_addr_valid(addr)))
3647 		goto out;
3648 	if (unlikely(!kern_addr_valid(addr + size - 1)))
3649 		goto out;
3650 	page = virt_to_page(ptr);
3651 	if (unlikely(!PageSlab(page)))
3652 		goto out;
3653 	if (unlikely(page_get_cache(page) != cachep))
3654 		goto out;
3655 	return 1;
3656 out:
3657 	return 0;
3658 }
3659 
3660 #ifdef CONFIG_NUMA
3661 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3662 {
3663 	return __cache_alloc_node(cachep, flags, nodeid,
3664 			__builtin_return_address(0));
3665 }
3666 EXPORT_SYMBOL(kmem_cache_alloc_node);
3667 
3668 static __always_inline void *
3669 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3670 {
3671 	struct kmem_cache *cachep;
3672 
3673 	cachep = kmem_find_general_cachep(size, flags);
3674 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3675 		return cachep;
3676 	return kmem_cache_alloc_node(cachep, flags, node);
3677 }
3678 
3679 #ifdef CONFIG_DEBUG_SLAB
3680 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3681 {
3682 	return __do_kmalloc_node(size, flags, node,
3683 			__builtin_return_address(0));
3684 }
3685 EXPORT_SYMBOL(__kmalloc_node);
3686 
3687 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3688 		int node, void *caller)
3689 {
3690 	return __do_kmalloc_node(size, flags, node, caller);
3691 }
3692 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3693 #else
3694 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3695 {
3696 	return __do_kmalloc_node(size, flags, node, NULL);
3697 }
3698 EXPORT_SYMBOL(__kmalloc_node);
3699 #endif /* CONFIG_DEBUG_SLAB */
3700 #endif /* CONFIG_NUMA */
3701 
3702 /**
3703  * __do_kmalloc - allocate memory
3704  * @size: how many bytes of memory are required.
3705  * @flags: the type of memory to allocate (see kmalloc).
3706  * @caller: function caller for debug tracking of the caller
3707  */
3708 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3709 					  void *caller)
3710 {
3711 	struct kmem_cache *cachep;
3712 
3713 	/* If you want to save a few bytes .text space: replace
3714 	 * __ with kmem_.
3715 	 * Then kmalloc uses the uninlined functions instead of the inline
3716 	 * functions.
3717 	 */
3718 	cachep = __find_general_cachep(size, flags);
3719 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3720 		return cachep;
3721 	return __cache_alloc(cachep, flags, caller);
3722 }
3723 
3724 
3725 #ifdef CONFIG_DEBUG_SLAB
3726 void *__kmalloc(size_t size, gfp_t flags)
3727 {
3728 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3729 }
3730 EXPORT_SYMBOL(__kmalloc);
3731 
3732 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3733 {
3734 	return __do_kmalloc(size, flags, caller);
3735 }
3736 EXPORT_SYMBOL(__kmalloc_track_caller);
3737 
3738 #else
3739 void *__kmalloc(size_t size, gfp_t flags)
3740 {
3741 	return __do_kmalloc(size, flags, NULL);
3742 }
3743 EXPORT_SYMBOL(__kmalloc);
3744 #endif
3745 
3746 /**
3747  * kmem_cache_free - Deallocate an object
3748  * @cachep: The cache the allocation was from.
3749  * @objp: The previously allocated object.
3750  *
3751  * Free an object which was previously allocated from this
3752  * cache.
3753  */
3754 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3755 {
3756 	unsigned long flags;
3757 
3758 	local_irq_save(flags);
3759 	debug_check_no_locks_freed(objp, obj_size(cachep));
3760 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3761 		debug_check_no_obj_freed(objp, obj_size(cachep));
3762 	__cache_free(cachep, objp);
3763 	local_irq_restore(flags);
3764 }
3765 EXPORT_SYMBOL(kmem_cache_free);
3766 
3767 /**
3768  * kfree - free previously allocated memory
3769  * @objp: pointer returned by kmalloc.
3770  *
3771  * If @objp is NULL, no operation is performed.
3772  *
3773  * Don't free memory not originally allocated by kmalloc()
3774  * or you will run into trouble.
3775  */
3776 void kfree(const void *objp)
3777 {
3778 	struct kmem_cache *c;
3779 	unsigned long flags;
3780 
3781 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3782 		return;
3783 	local_irq_save(flags);
3784 	kfree_debugcheck(objp);
3785 	c = virt_to_cache(objp);
3786 	debug_check_no_locks_freed(objp, obj_size(c));
3787 	debug_check_no_obj_freed(objp, obj_size(c));
3788 	__cache_free(c, (void *)objp);
3789 	local_irq_restore(flags);
3790 }
3791 EXPORT_SYMBOL(kfree);
3792 
3793 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3794 {
3795 	return obj_size(cachep);
3796 }
3797 EXPORT_SYMBOL(kmem_cache_size);
3798 
3799 const char *kmem_cache_name(struct kmem_cache *cachep)
3800 {
3801 	return cachep->name;
3802 }
3803 EXPORT_SYMBOL_GPL(kmem_cache_name);
3804 
3805 /*
3806  * This initializes kmem_list3 or resizes various caches for all nodes.
3807  */
3808 static int alloc_kmemlist(struct kmem_cache *cachep)
3809 {
3810 	int node;
3811 	struct kmem_list3 *l3;
3812 	struct array_cache *new_shared;
3813 	struct array_cache **new_alien = NULL;
3814 
3815 	for_each_online_node(node) {
3816 
3817                 if (use_alien_caches) {
3818                         new_alien = alloc_alien_cache(node, cachep->limit);
3819                         if (!new_alien)
3820                                 goto fail;
3821                 }
3822 
3823 		new_shared = NULL;
3824 		if (cachep->shared) {
3825 			new_shared = alloc_arraycache(node,
3826 				cachep->shared*cachep->batchcount,
3827 					0xbaadf00d);
3828 			if (!new_shared) {
3829 				free_alien_cache(new_alien);
3830 				goto fail;
3831 			}
3832 		}
3833 
3834 		l3 = cachep->nodelists[node];
3835 		if (l3) {
3836 			struct array_cache *shared = l3->shared;
3837 
3838 			spin_lock_irq(&l3->list_lock);
3839 
3840 			if (shared)
3841 				free_block(cachep, shared->entry,
3842 						shared->avail, node);
3843 
3844 			l3->shared = new_shared;
3845 			if (!l3->alien) {
3846 				l3->alien = new_alien;
3847 				new_alien = NULL;
3848 			}
3849 			l3->free_limit = (1 + nr_cpus_node(node)) *
3850 					cachep->batchcount + cachep->num;
3851 			spin_unlock_irq(&l3->list_lock);
3852 			kfree(shared);
3853 			free_alien_cache(new_alien);
3854 			continue;
3855 		}
3856 		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3857 		if (!l3) {
3858 			free_alien_cache(new_alien);
3859 			kfree(new_shared);
3860 			goto fail;
3861 		}
3862 
3863 		kmem_list3_init(l3);
3864 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3865 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3866 		l3->shared = new_shared;
3867 		l3->alien = new_alien;
3868 		l3->free_limit = (1 + nr_cpus_node(node)) *
3869 					cachep->batchcount + cachep->num;
3870 		cachep->nodelists[node] = l3;
3871 	}
3872 	return 0;
3873 
3874 fail:
3875 	if (!cachep->next.next) {
3876 		/* Cache is not active yet. Roll back what we did */
3877 		node--;
3878 		while (node >= 0) {
3879 			if (cachep->nodelists[node]) {
3880 				l3 = cachep->nodelists[node];
3881 
3882 				kfree(l3->shared);
3883 				free_alien_cache(l3->alien);
3884 				kfree(l3);
3885 				cachep->nodelists[node] = NULL;
3886 			}
3887 			node--;
3888 		}
3889 	}
3890 	return -ENOMEM;
3891 }
3892 
3893 struct ccupdate_struct {
3894 	struct kmem_cache *cachep;
3895 	struct array_cache *new[NR_CPUS];
3896 };
3897 
3898 static void do_ccupdate_local(void *info)
3899 {
3900 	struct ccupdate_struct *new = info;
3901 	struct array_cache *old;
3902 
3903 	check_irq_off();
3904 	old = cpu_cache_get(new->cachep);
3905 
3906 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3907 	new->new[smp_processor_id()] = old;
3908 }
3909 
3910 /* Always called with the cache_chain_mutex held */
3911 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3912 				int batchcount, int shared)
3913 {
3914 	struct ccupdate_struct *new;
3915 	int i;
3916 
3917 	new = kzalloc(sizeof(*new), GFP_KERNEL);
3918 	if (!new)
3919 		return -ENOMEM;
3920 
3921 	for_each_online_cpu(i) {
3922 		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3923 						batchcount);
3924 		if (!new->new[i]) {
3925 			for (i--; i >= 0; i--)
3926 				kfree(new->new[i]);
3927 			kfree(new);
3928 			return -ENOMEM;
3929 		}
3930 	}
3931 	new->cachep = cachep;
3932 
3933 	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3934 
3935 	check_irq_on();
3936 	cachep->batchcount = batchcount;
3937 	cachep->limit = limit;
3938 	cachep->shared = shared;
3939 
3940 	for_each_online_cpu(i) {
3941 		struct array_cache *ccold = new->new[i];
3942 		if (!ccold)
3943 			continue;
3944 		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3945 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3946 		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3947 		kfree(ccold);
3948 	}
3949 	kfree(new);
3950 	return alloc_kmemlist(cachep);
3951 }
3952 
3953 /* Called with cache_chain_mutex held always */
3954 static int enable_cpucache(struct kmem_cache *cachep)
3955 {
3956 	int err;
3957 	int limit, shared;
3958 
3959 	/*
3960 	 * The head array serves three purposes:
3961 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3962 	 * - reduce the number of spinlock operations.
3963 	 * - reduce the number of linked list operations on the slab and
3964 	 *   bufctl chains: array operations are cheaper.
3965 	 * The numbers are guessed, we should auto-tune as described by
3966 	 * Bonwick.
3967 	 */
3968 	if (cachep->buffer_size > 131072)
3969 		limit = 1;
3970 	else if (cachep->buffer_size > PAGE_SIZE)
3971 		limit = 8;
3972 	else if (cachep->buffer_size > 1024)
3973 		limit = 24;
3974 	else if (cachep->buffer_size > 256)
3975 		limit = 54;
3976 	else
3977 		limit = 120;
3978 
3979 	/*
3980 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3981 	 * allocation behaviour: Most allocs on one cpu, most free operations
3982 	 * on another cpu. For these cases, an efficient object passing between
3983 	 * cpus is necessary. This is provided by a shared array. The array
3984 	 * replaces Bonwick's magazine layer.
3985 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3986 	 * to a larger limit. Thus disabled by default.
3987 	 */
3988 	shared = 0;
3989 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
3990 		shared = 8;
3991 
3992 #if DEBUG
3993 	/*
3994 	 * With debugging enabled, large batchcount lead to excessively long
3995 	 * periods with disabled local interrupts. Limit the batchcount
3996 	 */
3997 	if (limit > 32)
3998 		limit = 32;
3999 #endif
4000 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
4001 	if (err)
4002 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4003 		       cachep->name, -err);
4004 	return err;
4005 }
4006 
4007 /*
4008  * Drain an array if it contains any elements taking the l3 lock only if
4009  * necessary. Note that the l3 listlock also protects the array_cache
4010  * if drain_array() is used on the shared array.
4011  */
4012 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4013 			 struct array_cache *ac, int force, int node)
4014 {
4015 	int tofree;
4016 
4017 	if (!ac || !ac->avail)
4018 		return;
4019 	if (ac->touched && !force) {
4020 		ac->touched = 0;
4021 	} else {
4022 		spin_lock_irq(&l3->list_lock);
4023 		if (ac->avail) {
4024 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4025 			if (tofree > ac->avail)
4026 				tofree = (ac->avail + 1) / 2;
4027 			free_block(cachep, ac->entry, tofree, node);
4028 			ac->avail -= tofree;
4029 			memmove(ac->entry, &(ac->entry[tofree]),
4030 				sizeof(void *) * ac->avail);
4031 		}
4032 		spin_unlock_irq(&l3->list_lock);
4033 	}
4034 }
4035 
4036 /**
4037  * cache_reap - Reclaim memory from caches.
4038  * @w: work descriptor
4039  *
4040  * Called from workqueue/eventd every few seconds.
4041  * Purpose:
4042  * - clear the per-cpu caches for this CPU.
4043  * - return freeable pages to the main free memory pool.
4044  *
4045  * If we cannot acquire the cache chain mutex then just give up - we'll try
4046  * again on the next iteration.
4047  */
4048 static void cache_reap(struct work_struct *w)
4049 {
4050 	struct kmem_cache *searchp;
4051 	struct kmem_list3 *l3;
4052 	int node = numa_node_id();
4053 	struct delayed_work *work =
4054 		container_of(w, struct delayed_work, work);
4055 
4056 	if (!mutex_trylock(&cache_chain_mutex))
4057 		/* Give up. Setup the next iteration. */
4058 		goto out;
4059 
4060 	list_for_each_entry(searchp, &cache_chain, next) {
4061 		check_irq_on();
4062 
4063 		/*
4064 		 * We only take the l3 lock if absolutely necessary and we
4065 		 * have established with reasonable certainty that
4066 		 * we can do some work if the lock was obtained.
4067 		 */
4068 		l3 = searchp->nodelists[node];
4069 
4070 		reap_alien(searchp, l3);
4071 
4072 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4073 
4074 		/*
4075 		 * These are racy checks but it does not matter
4076 		 * if we skip one check or scan twice.
4077 		 */
4078 		if (time_after(l3->next_reap, jiffies))
4079 			goto next;
4080 
4081 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4082 
4083 		drain_array(searchp, l3, l3->shared, 0, node);
4084 
4085 		if (l3->free_touched)
4086 			l3->free_touched = 0;
4087 		else {
4088 			int freed;
4089 
4090 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4091 				5 * searchp->num - 1) / (5 * searchp->num));
4092 			STATS_ADD_REAPED(searchp, freed);
4093 		}
4094 next:
4095 		cond_resched();
4096 	}
4097 	check_irq_on();
4098 	mutex_unlock(&cache_chain_mutex);
4099 	next_reap_node();
4100 out:
4101 	/* Set up the next iteration */
4102 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4103 }
4104 
4105 #ifdef CONFIG_SLABINFO
4106 
4107 static void print_slabinfo_header(struct seq_file *m)
4108 {
4109 	/*
4110 	 * Output format version, so at least we can change it
4111 	 * without _too_ many complaints.
4112 	 */
4113 #if STATS
4114 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4115 #else
4116 	seq_puts(m, "slabinfo - version: 2.1\n");
4117 #endif
4118 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4119 		 "<objperslab> <pagesperslab>");
4120 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4121 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4122 #if STATS
4123 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4124 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4125 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4126 #endif
4127 	seq_putc(m, '\n');
4128 }
4129 
4130 static void *s_start(struct seq_file *m, loff_t *pos)
4131 {
4132 	loff_t n = *pos;
4133 
4134 	mutex_lock(&cache_chain_mutex);
4135 	if (!n)
4136 		print_slabinfo_header(m);
4137 
4138 	return seq_list_start(&cache_chain, *pos);
4139 }
4140 
4141 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4142 {
4143 	return seq_list_next(p, &cache_chain, pos);
4144 }
4145 
4146 static void s_stop(struct seq_file *m, void *p)
4147 {
4148 	mutex_unlock(&cache_chain_mutex);
4149 }
4150 
4151 static int s_show(struct seq_file *m, void *p)
4152 {
4153 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4154 	struct slab *slabp;
4155 	unsigned long active_objs;
4156 	unsigned long num_objs;
4157 	unsigned long active_slabs = 0;
4158 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4159 	const char *name;
4160 	char *error = NULL;
4161 	int node;
4162 	struct kmem_list3 *l3;
4163 
4164 	active_objs = 0;
4165 	num_slabs = 0;
4166 	for_each_online_node(node) {
4167 		l3 = cachep->nodelists[node];
4168 		if (!l3)
4169 			continue;
4170 
4171 		check_irq_on();
4172 		spin_lock_irq(&l3->list_lock);
4173 
4174 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4175 			if (slabp->inuse != cachep->num && !error)
4176 				error = "slabs_full accounting error";
4177 			active_objs += cachep->num;
4178 			active_slabs++;
4179 		}
4180 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4181 			if (slabp->inuse == cachep->num && !error)
4182 				error = "slabs_partial inuse accounting error";
4183 			if (!slabp->inuse && !error)
4184 				error = "slabs_partial/inuse accounting error";
4185 			active_objs += slabp->inuse;
4186 			active_slabs++;
4187 		}
4188 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4189 			if (slabp->inuse && !error)
4190 				error = "slabs_free/inuse accounting error";
4191 			num_slabs++;
4192 		}
4193 		free_objects += l3->free_objects;
4194 		if (l3->shared)
4195 			shared_avail += l3->shared->avail;
4196 
4197 		spin_unlock_irq(&l3->list_lock);
4198 	}
4199 	num_slabs += active_slabs;
4200 	num_objs = num_slabs * cachep->num;
4201 	if (num_objs - active_objs != free_objects && !error)
4202 		error = "free_objects accounting error";
4203 
4204 	name = cachep->name;
4205 	if (error)
4206 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4207 
4208 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4209 		   name, active_objs, num_objs, cachep->buffer_size,
4210 		   cachep->num, (1 << cachep->gfporder));
4211 	seq_printf(m, " : tunables %4u %4u %4u",
4212 		   cachep->limit, cachep->batchcount, cachep->shared);
4213 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4214 		   active_slabs, num_slabs, shared_avail);
4215 #if STATS
4216 	{			/* list3 stats */
4217 		unsigned long high = cachep->high_mark;
4218 		unsigned long allocs = cachep->num_allocations;
4219 		unsigned long grown = cachep->grown;
4220 		unsigned long reaped = cachep->reaped;
4221 		unsigned long errors = cachep->errors;
4222 		unsigned long max_freeable = cachep->max_freeable;
4223 		unsigned long node_allocs = cachep->node_allocs;
4224 		unsigned long node_frees = cachep->node_frees;
4225 		unsigned long overflows = cachep->node_overflow;
4226 
4227 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
4228 				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
4229 				reaped, errors, max_freeable, node_allocs,
4230 				node_frees, overflows);
4231 	}
4232 	/* cpu stats */
4233 	{
4234 		unsigned long allochit = atomic_read(&cachep->allochit);
4235 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4236 		unsigned long freehit = atomic_read(&cachep->freehit);
4237 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4238 
4239 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4240 			   allochit, allocmiss, freehit, freemiss);
4241 	}
4242 #endif
4243 	seq_putc(m, '\n');
4244 	return 0;
4245 }
4246 
4247 /*
4248  * slabinfo_op - iterator that generates /proc/slabinfo
4249  *
4250  * Output layout:
4251  * cache-name
4252  * num-active-objs
4253  * total-objs
4254  * object size
4255  * num-active-slabs
4256  * total-slabs
4257  * num-pages-per-slab
4258  * + further values on SMP and with statistics enabled
4259  */
4260 
4261 const struct seq_operations slabinfo_op = {
4262 	.start = s_start,
4263 	.next = s_next,
4264 	.stop = s_stop,
4265 	.show = s_show,
4266 };
4267 
4268 #define MAX_SLABINFO_WRITE 128
4269 /**
4270  * slabinfo_write - Tuning for the slab allocator
4271  * @file: unused
4272  * @buffer: user buffer
4273  * @count: data length
4274  * @ppos: unused
4275  */
4276 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4277 		       size_t count, loff_t *ppos)
4278 {
4279 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4280 	int limit, batchcount, shared, res;
4281 	struct kmem_cache *cachep;
4282 
4283 	if (count > MAX_SLABINFO_WRITE)
4284 		return -EINVAL;
4285 	if (copy_from_user(&kbuf, buffer, count))
4286 		return -EFAULT;
4287 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4288 
4289 	tmp = strchr(kbuf, ' ');
4290 	if (!tmp)
4291 		return -EINVAL;
4292 	*tmp = '\0';
4293 	tmp++;
4294 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4295 		return -EINVAL;
4296 
4297 	/* Find the cache in the chain of caches. */
4298 	mutex_lock(&cache_chain_mutex);
4299 	res = -EINVAL;
4300 	list_for_each_entry(cachep, &cache_chain, next) {
4301 		if (!strcmp(cachep->name, kbuf)) {
4302 			if (limit < 1 || batchcount < 1 ||
4303 					batchcount > limit || shared < 0) {
4304 				res = 0;
4305 			} else {
4306 				res = do_tune_cpucache(cachep, limit,
4307 						       batchcount, shared);
4308 			}
4309 			break;
4310 		}
4311 	}
4312 	mutex_unlock(&cache_chain_mutex);
4313 	if (res >= 0)
4314 		res = count;
4315 	return res;
4316 }
4317 
4318 #ifdef CONFIG_DEBUG_SLAB_LEAK
4319 
4320 static void *leaks_start(struct seq_file *m, loff_t *pos)
4321 {
4322 	mutex_lock(&cache_chain_mutex);
4323 	return seq_list_start(&cache_chain, *pos);
4324 }
4325 
4326 static inline int add_caller(unsigned long *n, unsigned long v)
4327 {
4328 	unsigned long *p;
4329 	int l;
4330 	if (!v)
4331 		return 1;
4332 	l = n[1];
4333 	p = n + 2;
4334 	while (l) {
4335 		int i = l/2;
4336 		unsigned long *q = p + 2 * i;
4337 		if (*q == v) {
4338 			q[1]++;
4339 			return 1;
4340 		}
4341 		if (*q > v) {
4342 			l = i;
4343 		} else {
4344 			p = q + 2;
4345 			l -= i + 1;
4346 		}
4347 	}
4348 	if (++n[1] == n[0])
4349 		return 0;
4350 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4351 	p[0] = v;
4352 	p[1] = 1;
4353 	return 1;
4354 }
4355 
4356 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4357 {
4358 	void *p;
4359 	int i;
4360 	if (n[0] == n[1])
4361 		return;
4362 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4363 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4364 			continue;
4365 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4366 			return;
4367 	}
4368 }
4369 
4370 static void show_symbol(struct seq_file *m, unsigned long address)
4371 {
4372 #ifdef CONFIG_KALLSYMS
4373 	unsigned long offset, size;
4374 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4375 
4376 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4377 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4378 		if (modname[0])
4379 			seq_printf(m, " [%s]", modname);
4380 		return;
4381 	}
4382 #endif
4383 	seq_printf(m, "%p", (void *)address);
4384 }
4385 
4386 static int leaks_show(struct seq_file *m, void *p)
4387 {
4388 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4389 	struct slab *slabp;
4390 	struct kmem_list3 *l3;
4391 	const char *name;
4392 	unsigned long *n = m->private;
4393 	int node;
4394 	int i;
4395 
4396 	if (!(cachep->flags & SLAB_STORE_USER))
4397 		return 0;
4398 	if (!(cachep->flags & SLAB_RED_ZONE))
4399 		return 0;
4400 
4401 	/* OK, we can do it */
4402 
4403 	n[1] = 0;
4404 
4405 	for_each_online_node(node) {
4406 		l3 = cachep->nodelists[node];
4407 		if (!l3)
4408 			continue;
4409 
4410 		check_irq_on();
4411 		spin_lock_irq(&l3->list_lock);
4412 
4413 		list_for_each_entry(slabp, &l3->slabs_full, list)
4414 			handle_slab(n, cachep, slabp);
4415 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4416 			handle_slab(n, cachep, slabp);
4417 		spin_unlock_irq(&l3->list_lock);
4418 	}
4419 	name = cachep->name;
4420 	if (n[0] == n[1]) {
4421 		/* Increase the buffer size */
4422 		mutex_unlock(&cache_chain_mutex);
4423 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4424 		if (!m->private) {
4425 			/* Too bad, we are really out */
4426 			m->private = n;
4427 			mutex_lock(&cache_chain_mutex);
4428 			return -ENOMEM;
4429 		}
4430 		*(unsigned long *)m->private = n[0] * 2;
4431 		kfree(n);
4432 		mutex_lock(&cache_chain_mutex);
4433 		/* Now make sure this entry will be retried */
4434 		m->count = m->size;
4435 		return 0;
4436 	}
4437 	for (i = 0; i < n[1]; i++) {
4438 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4439 		show_symbol(m, n[2*i+2]);
4440 		seq_putc(m, '\n');
4441 	}
4442 
4443 	return 0;
4444 }
4445 
4446 const struct seq_operations slabstats_op = {
4447 	.start = leaks_start,
4448 	.next = s_next,
4449 	.stop = s_stop,
4450 	.show = leaks_show,
4451 };
4452 #endif
4453 #endif
4454 
4455 /**
4456  * ksize - get the actual amount of memory allocated for a given object
4457  * @objp: Pointer to the object
4458  *
4459  * kmalloc may internally round up allocations and return more memory
4460  * than requested. ksize() can be used to determine the actual amount of
4461  * memory allocated. The caller may use this additional memory, even though
4462  * a smaller amount of memory was initially specified with the kmalloc call.
4463  * The caller must guarantee that objp points to a valid object previously
4464  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4465  * must not be freed during the duration of the call.
4466  */
4467 size_t ksize(const void *objp)
4468 {
4469 	BUG_ON(!objp);
4470 	if (unlikely(objp == ZERO_SIZE_PTR))
4471 		return 0;
4472 
4473 	return obj_size(virt_to_cache(objp));
4474 }
4475 EXPORT_SYMBOL(ksize);
4476