xref: /openbmc/linux/mm/slab.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same intializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/seq_file.h>
99 #include	<linux/notifier.h>
100 #include	<linux/kallsyms.h>
101 #include	<linux/cpu.h>
102 #include	<linux/sysctl.h>
103 #include	<linux/module.h>
104 #include	<linux/rcupdate.h>
105 #include	<linux/string.h>
106 #include	<linux/uaccess.h>
107 #include	<linux/nodemask.h>
108 #include	<linux/mempolicy.h>
109 #include	<linux/mutex.h>
110 #include	<linux/fault-inject.h>
111 #include	<linux/rtmutex.h>
112 #include	<linux/reciprocal_div.h>
113 
114 #include	<asm/cacheflush.h>
115 #include	<asm/tlbflush.h>
116 #include	<asm/page.h>
117 
118 /*
119  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
120  *		  0 for faster, smaller code (especially in the critical paths).
121  *
122  * STATS	- 1 to collect stats for /proc/slabinfo.
123  *		  0 for faster, smaller code (especially in the critical paths).
124  *
125  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
126  */
127 
128 #ifdef CONFIG_DEBUG_SLAB
129 #define	DEBUG		1
130 #define	STATS		1
131 #define	FORCED_DEBUG	1
132 #else
133 #define	DEBUG		0
134 #define	STATS		0
135 #define	FORCED_DEBUG	0
136 #endif
137 
138 /* Shouldn't this be in a header file somewhere? */
139 #define	BYTES_PER_WORD		sizeof(void *)
140 
141 #ifndef cache_line_size
142 #define cache_line_size()	L1_CACHE_BYTES
143 #endif
144 
145 #ifndef ARCH_KMALLOC_MINALIGN
146 /*
147  * Enforce a minimum alignment for the kmalloc caches.
148  * Usually, the kmalloc caches are cache_line_size() aligned, except when
149  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
150  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
151  * alignment larger than the alignment of a 64-bit integer.
152  * ARCH_KMALLOC_MINALIGN allows that.
153  * Note that increasing this value may disable some debug features.
154  */
155 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
156 #endif
157 
158 #ifndef ARCH_SLAB_MINALIGN
159 /*
160  * Enforce a minimum alignment for all caches.
161  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
162  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
163  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
164  * some debug features.
165  */
166 #define ARCH_SLAB_MINALIGN 0
167 #endif
168 
169 #ifndef ARCH_KMALLOC_FLAGS
170 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
171 #endif
172 
173 /* Legal flag mask for kmem_cache_create(). */
174 #if DEBUG
175 # define CREATE_MASK	(SLAB_RED_ZONE | \
176 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177 			 SLAB_CACHE_DMA | \
178 			 SLAB_STORE_USER | \
179 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
181 #else
182 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
183 			 SLAB_CACHE_DMA | \
184 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
186 #endif
187 
188 /*
189  * kmem_bufctl_t:
190  *
191  * Bufctl's are used for linking objs within a slab
192  * linked offsets.
193  *
194  * This implementation relies on "struct page" for locating the cache &
195  * slab an object belongs to.
196  * This allows the bufctl structure to be small (one int), but limits
197  * the number of objects a slab (not a cache) can contain when off-slab
198  * bufctls are used. The limit is the size of the largest general cache
199  * that does not use off-slab slabs.
200  * For 32bit archs with 4 kB pages, is this 56.
201  * This is not serious, as it is only for large objects, when it is unwise
202  * to have too many per slab.
203  * Note: This limit can be raised by introducing a general cache whose size
204  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
205  */
206 
207 typedef unsigned int kmem_bufctl_t;
208 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
209 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
210 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
211 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
212 
213 /*
214  * struct slab
215  *
216  * Manages the objs in a slab. Placed either at the beginning of mem allocated
217  * for a slab, or allocated from an general cache.
218  * Slabs are chained into three list: fully used, partial, fully free slabs.
219  */
220 struct slab {
221 	struct list_head list;
222 	unsigned long colouroff;
223 	void *s_mem;		/* including colour offset */
224 	unsigned int inuse;	/* num of objs active in slab */
225 	kmem_bufctl_t free;
226 	unsigned short nodeid;
227 };
228 
229 /*
230  * struct slab_rcu
231  *
232  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
233  * arrange for kmem_freepages to be called via RCU.  This is useful if
234  * we need to approach a kernel structure obliquely, from its address
235  * obtained without the usual locking.  We can lock the structure to
236  * stabilize it and check it's still at the given address, only if we
237  * can be sure that the memory has not been meanwhile reused for some
238  * other kind of object (which our subsystem's lock might corrupt).
239  *
240  * rcu_read_lock before reading the address, then rcu_read_unlock after
241  * taking the spinlock within the structure expected at that address.
242  *
243  * We assume struct slab_rcu can overlay struct slab when destroying.
244  */
245 struct slab_rcu {
246 	struct rcu_head head;
247 	struct kmem_cache *cachep;
248 	void *addr;
249 };
250 
251 /*
252  * struct array_cache
253  *
254  * Purpose:
255  * - LIFO ordering, to hand out cache-warm objects from _alloc
256  * - reduce the number of linked list operations
257  * - reduce spinlock operations
258  *
259  * The limit is stored in the per-cpu structure to reduce the data cache
260  * footprint.
261  *
262  */
263 struct array_cache {
264 	unsigned int avail;
265 	unsigned int limit;
266 	unsigned int batchcount;
267 	unsigned int touched;
268 	spinlock_t lock;
269 	void *entry[0];	/*
270 			 * Must have this definition in here for the proper
271 			 * alignment of array_cache. Also simplifies accessing
272 			 * the entries.
273 			 * [0] is for gcc 2.95. It should really be [].
274 			 */
275 };
276 
277 /*
278  * bootstrap: The caches do not work without cpuarrays anymore, but the
279  * cpuarrays are allocated from the generic caches...
280  */
281 #define BOOT_CPUCACHE_ENTRIES	1
282 struct arraycache_init {
283 	struct array_cache cache;
284 	void *entries[BOOT_CPUCACHE_ENTRIES];
285 };
286 
287 /*
288  * The slab lists for all objects.
289  */
290 struct kmem_list3 {
291 	struct list_head slabs_partial;	/* partial list first, better asm code */
292 	struct list_head slabs_full;
293 	struct list_head slabs_free;
294 	unsigned long free_objects;
295 	unsigned int free_limit;
296 	unsigned int colour_next;	/* Per-node cache coloring */
297 	spinlock_t list_lock;
298 	struct array_cache *shared;	/* shared per node */
299 	struct array_cache **alien;	/* on other nodes */
300 	unsigned long next_reap;	/* updated without locking */
301 	int free_touched;		/* updated without locking */
302 };
303 
304 /*
305  * Need this for bootstrapping a per node allocator.
306  */
307 #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
308 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309 #define	CACHE_CACHE 0
310 #define	SIZE_AC 1
311 #define	SIZE_L3 (1 + MAX_NUMNODES)
312 
313 static int drain_freelist(struct kmem_cache *cache,
314 			struct kmem_list3 *l3, int tofree);
315 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
316 			int node);
317 static int enable_cpucache(struct kmem_cache *cachep);
318 static void cache_reap(struct work_struct *unused);
319 
320 /*
321  * This function must be completely optimized away if a constant is passed to
322  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
323  */
324 static __always_inline int index_of(const size_t size)
325 {
326 	extern void __bad_size(void);
327 
328 	if (__builtin_constant_p(size)) {
329 		int i = 0;
330 
331 #define CACHE(x) \
332 	if (size <=x) \
333 		return i; \
334 	else \
335 		i++;
336 #include "linux/kmalloc_sizes.h"
337 #undef CACHE
338 		__bad_size();
339 	} else
340 		__bad_size();
341 	return 0;
342 }
343 
344 static int slab_early_init = 1;
345 
346 #define INDEX_AC index_of(sizeof(struct arraycache_init))
347 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
348 
349 static void kmem_list3_init(struct kmem_list3 *parent)
350 {
351 	INIT_LIST_HEAD(&parent->slabs_full);
352 	INIT_LIST_HEAD(&parent->slabs_partial);
353 	INIT_LIST_HEAD(&parent->slabs_free);
354 	parent->shared = NULL;
355 	parent->alien = NULL;
356 	parent->colour_next = 0;
357 	spin_lock_init(&parent->list_lock);
358 	parent->free_objects = 0;
359 	parent->free_touched = 0;
360 }
361 
362 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
363 	do {								\
364 		INIT_LIST_HEAD(listp);					\
365 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
366 	} while (0)
367 
368 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
369 	do {								\
370 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
371 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
372 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
373 	} while (0)
374 
375 /*
376  * struct kmem_cache
377  *
378  * manages a cache.
379  */
380 
381 struct kmem_cache {
382 /* 1) per-cpu data, touched during every alloc/free */
383 	struct array_cache *array[NR_CPUS];
384 /* 2) Cache tunables. Protected by cache_chain_mutex */
385 	unsigned int batchcount;
386 	unsigned int limit;
387 	unsigned int shared;
388 
389 	unsigned int buffer_size;
390 	u32 reciprocal_buffer_size;
391 /* 3) touched by every alloc & free from the backend */
392 
393 	unsigned int flags;		/* constant flags */
394 	unsigned int num;		/* # of objs per slab */
395 
396 /* 4) cache_grow/shrink */
397 	/* order of pgs per slab (2^n) */
398 	unsigned int gfporder;
399 
400 	/* force GFP flags, e.g. GFP_DMA */
401 	gfp_t gfpflags;
402 
403 	size_t colour;			/* cache colouring range */
404 	unsigned int colour_off;	/* colour offset */
405 	struct kmem_cache *slabp_cache;
406 	unsigned int slab_size;
407 	unsigned int dflags;		/* dynamic flags */
408 
409 	/* constructor func */
410 	void (*ctor) (void *, struct kmem_cache *, unsigned long);
411 
412 	/* de-constructor func */
413 	void (*dtor) (void *, struct kmem_cache *, unsigned long);
414 
415 /* 5) cache creation/removal */
416 	const char *name;
417 	struct list_head next;
418 
419 /* 6) statistics */
420 #if STATS
421 	unsigned long num_active;
422 	unsigned long num_allocations;
423 	unsigned long high_mark;
424 	unsigned long grown;
425 	unsigned long reaped;
426 	unsigned long errors;
427 	unsigned long max_freeable;
428 	unsigned long node_allocs;
429 	unsigned long node_frees;
430 	unsigned long node_overflow;
431 	atomic_t allochit;
432 	atomic_t allocmiss;
433 	atomic_t freehit;
434 	atomic_t freemiss;
435 #endif
436 #if DEBUG
437 	/*
438 	 * If debugging is enabled, then the allocator can add additional
439 	 * fields and/or padding to every object. buffer_size contains the total
440 	 * object size including these internal fields, the following two
441 	 * variables contain the offset to the user object and its size.
442 	 */
443 	int obj_offset;
444 	int obj_size;
445 #endif
446 	/*
447 	 * We put nodelists[] at the end of kmem_cache, because we want to size
448 	 * this array to nr_node_ids slots instead of MAX_NUMNODES
449 	 * (see kmem_cache_init())
450 	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
451 	 * is statically defined, so we reserve the max number of nodes.
452 	 */
453 	struct kmem_list3 *nodelists[MAX_NUMNODES];
454 	/*
455 	 * Do not add fields after nodelists[]
456 	 */
457 };
458 
459 #define CFLGS_OFF_SLAB		(0x80000000UL)
460 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
461 
462 #define BATCHREFILL_LIMIT	16
463 /*
464  * Optimization question: fewer reaps means less probability for unnessary
465  * cpucache drain/refill cycles.
466  *
467  * OTOH the cpuarrays can contain lots of objects,
468  * which could lock up otherwise freeable slabs.
469  */
470 #define REAPTIMEOUT_CPUC	(2*HZ)
471 #define REAPTIMEOUT_LIST3	(4*HZ)
472 
473 #if STATS
474 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
475 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
476 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
477 #define	STATS_INC_GROWN(x)	((x)->grown++)
478 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
479 #define	STATS_SET_HIGH(x)						\
480 	do {								\
481 		if ((x)->num_active > (x)->high_mark)			\
482 			(x)->high_mark = (x)->num_active;		\
483 	} while (0)
484 #define	STATS_INC_ERR(x)	((x)->errors++)
485 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
486 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
487 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
488 #define	STATS_SET_FREEABLE(x, i)					\
489 	do {								\
490 		if ((x)->max_freeable < i)				\
491 			(x)->max_freeable = i;				\
492 	} while (0)
493 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
494 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
495 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
496 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
497 #else
498 #define	STATS_INC_ACTIVE(x)	do { } while (0)
499 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
500 #define	STATS_INC_ALLOCED(x)	do { } while (0)
501 #define	STATS_INC_GROWN(x)	do { } while (0)
502 #define	STATS_ADD_REAPED(x,y)	do { } while (0)
503 #define	STATS_SET_HIGH(x)	do { } while (0)
504 #define	STATS_INC_ERR(x)	do { } while (0)
505 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
506 #define	STATS_INC_NODEFREES(x)	do { } while (0)
507 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
508 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
509 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
510 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
511 #define STATS_INC_FREEHIT(x)	do { } while (0)
512 #define STATS_INC_FREEMISS(x)	do { } while (0)
513 #endif
514 
515 #if DEBUG
516 
517 /*
518  * memory layout of objects:
519  * 0		: objp
520  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
521  * 		the end of an object is aligned with the end of the real
522  * 		allocation. Catches writes behind the end of the allocation.
523  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
524  * 		redzone word.
525  * cachep->obj_offset: The real object.
526  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
527  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
528  *					[BYTES_PER_WORD long]
529  */
530 static int obj_offset(struct kmem_cache *cachep)
531 {
532 	return cachep->obj_offset;
533 }
534 
535 static int obj_size(struct kmem_cache *cachep)
536 {
537 	return cachep->obj_size;
538 }
539 
540 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
541 {
542 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
543 	return (unsigned long long*) (objp + obj_offset(cachep) -
544 				      sizeof(unsigned long long));
545 }
546 
547 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
548 {
549 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
550 	if (cachep->flags & SLAB_STORE_USER)
551 		return (unsigned long long *)(objp + cachep->buffer_size -
552 					      sizeof(unsigned long long) -
553 					      BYTES_PER_WORD);
554 	return (unsigned long long *) (objp + cachep->buffer_size -
555 				       sizeof(unsigned long long));
556 }
557 
558 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
559 {
560 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
561 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
562 }
563 
564 #else
565 
566 #define obj_offset(x)			0
567 #define obj_size(cachep)		(cachep->buffer_size)
568 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
569 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
570 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
571 
572 #endif
573 
574 /*
575  * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
576  * order.
577  */
578 #if defined(CONFIG_LARGE_ALLOCS)
579 #define	MAX_OBJ_ORDER	13	/* up to 32Mb */
580 #define	MAX_GFP_ORDER	13	/* up to 32Mb */
581 #elif defined(CONFIG_MMU)
582 #define	MAX_OBJ_ORDER	5	/* 32 pages */
583 #define	MAX_GFP_ORDER	5	/* 32 pages */
584 #else
585 #define	MAX_OBJ_ORDER	8	/* up to 1Mb */
586 #define	MAX_GFP_ORDER	8	/* up to 1Mb */
587 #endif
588 
589 /*
590  * Do not go above this order unless 0 objects fit into the slab.
591  */
592 #define	BREAK_GFP_ORDER_HI	1
593 #define	BREAK_GFP_ORDER_LO	0
594 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
595 
596 /*
597  * Functions for storing/retrieving the cachep and or slab from the page
598  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
599  * these are used to find the cache which an obj belongs to.
600  */
601 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
602 {
603 	page->lru.next = (struct list_head *)cache;
604 }
605 
606 static inline struct kmem_cache *page_get_cache(struct page *page)
607 {
608 	page = compound_head(page);
609 	BUG_ON(!PageSlab(page));
610 	return (struct kmem_cache *)page->lru.next;
611 }
612 
613 static inline void page_set_slab(struct page *page, struct slab *slab)
614 {
615 	page->lru.prev = (struct list_head *)slab;
616 }
617 
618 static inline struct slab *page_get_slab(struct page *page)
619 {
620 	BUG_ON(!PageSlab(page));
621 	return (struct slab *)page->lru.prev;
622 }
623 
624 static inline struct kmem_cache *virt_to_cache(const void *obj)
625 {
626 	struct page *page = virt_to_head_page(obj);
627 	return page_get_cache(page);
628 }
629 
630 static inline struct slab *virt_to_slab(const void *obj)
631 {
632 	struct page *page = virt_to_head_page(obj);
633 	return page_get_slab(page);
634 }
635 
636 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
637 				 unsigned int idx)
638 {
639 	return slab->s_mem + cache->buffer_size * idx;
640 }
641 
642 /*
643  * We want to avoid an expensive divide : (offset / cache->buffer_size)
644  *   Using the fact that buffer_size is a constant for a particular cache,
645  *   we can replace (offset / cache->buffer_size) by
646  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
647  */
648 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
649 					const struct slab *slab, void *obj)
650 {
651 	u32 offset = (obj - slab->s_mem);
652 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
653 }
654 
655 /*
656  * These are the default caches for kmalloc. Custom caches can have other sizes.
657  */
658 struct cache_sizes malloc_sizes[] = {
659 #define CACHE(x) { .cs_size = (x) },
660 #include <linux/kmalloc_sizes.h>
661 	CACHE(ULONG_MAX)
662 #undef CACHE
663 };
664 EXPORT_SYMBOL(malloc_sizes);
665 
666 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
667 struct cache_names {
668 	char *name;
669 	char *name_dma;
670 };
671 
672 static struct cache_names __initdata cache_names[] = {
673 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
674 #include <linux/kmalloc_sizes.h>
675 	{NULL,}
676 #undef CACHE
677 };
678 
679 static struct arraycache_init initarray_cache __initdata =
680     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
681 static struct arraycache_init initarray_generic =
682     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
683 
684 /* internal cache of cache description objs */
685 static struct kmem_cache cache_cache = {
686 	.batchcount = 1,
687 	.limit = BOOT_CPUCACHE_ENTRIES,
688 	.shared = 1,
689 	.buffer_size = sizeof(struct kmem_cache),
690 	.name = "kmem_cache",
691 };
692 
693 #define BAD_ALIEN_MAGIC 0x01020304ul
694 
695 #ifdef CONFIG_LOCKDEP
696 
697 /*
698  * Slab sometimes uses the kmalloc slabs to store the slab headers
699  * for other slabs "off slab".
700  * The locking for this is tricky in that it nests within the locks
701  * of all other slabs in a few places; to deal with this special
702  * locking we put on-slab caches into a separate lock-class.
703  *
704  * We set lock class for alien array caches which are up during init.
705  * The lock annotation will be lost if all cpus of a node goes down and
706  * then comes back up during hotplug
707  */
708 static struct lock_class_key on_slab_l3_key;
709 static struct lock_class_key on_slab_alc_key;
710 
711 static inline void init_lock_keys(void)
712 
713 {
714 	int q;
715 	struct cache_sizes *s = malloc_sizes;
716 
717 	while (s->cs_size != ULONG_MAX) {
718 		for_each_node(q) {
719 			struct array_cache **alc;
720 			int r;
721 			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
722 			if (!l3 || OFF_SLAB(s->cs_cachep))
723 				continue;
724 			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
725 			alc = l3->alien;
726 			/*
727 			 * FIXME: This check for BAD_ALIEN_MAGIC
728 			 * should go away when common slab code is taught to
729 			 * work even without alien caches.
730 			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
731 			 * for alloc_alien_cache,
732 			 */
733 			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
734 				continue;
735 			for_each_node(r) {
736 				if (alc[r])
737 					lockdep_set_class(&alc[r]->lock,
738 					     &on_slab_alc_key);
739 			}
740 		}
741 		s++;
742 	}
743 }
744 #else
745 static inline void init_lock_keys(void)
746 {
747 }
748 #endif
749 
750 /*
751  * 1. Guard access to the cache-chain.
752  * 2. Protect sanity of cpu_online_map against cpu hotplug events
753  */
754 static DEFINE_MUTEX(cache_chain_mutex);
755 static struct list_head cache_chain;
756 
757 /*
758  * chicken and egg problem: delay the per-cpu array allocation
759  * until the general caches are up.
760  */
761 static enum {
762 	NONE,
763 	PARTIAL_AC,
764 	PARTIAL_L3,
765 	FULL
766 } g_cpucache_up;
767 
768 /*
769  * used by boot code to determine if it can use slab based allocator
770  */
771 int slab_is_available(void)
772 {
773 	return g_cpucache_up == FULL;
774 }
775 
776 static DEFINE_PER_CPU(struct delayed_work, reap_work);
777 
778 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
779 {
780 	return cachep->array[smp_processor_id()];
781 }
782 
783 static inline struct kmem_cache *__find_general_cachep(size_t size,
784 							gfp_t gfpflags)
785 {
786 	struct cache_sizes *csizep = malloc_sizes;
787 
788 #if DEBUG
789 	/* This happens if someone tries to call
790 	 * kmem_cache_create(), or __kmalloc(), before
791 	 * the generic caches are initialized.
792 	 */
793 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
794 #endif
795 	while (size > csizep->cs_size)
796 		csizep++;
797 
798 	/*
799 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
800 	 * has cs_{dma,}cachep==NULL. Thus no special case
801 	 * for large kmalloc calls required.
802 	 */
803 #ifdef CONFIG_ZONE_DMA
804 	if (unlikely(gfpflags & GFP_DMA))
805 		return csizep->cs_dmacachep;
806 #endif
807 	return csizep->cs_cachep;
808 }
809 
810 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
811 {
812 	return __find_general_cachep(size, gfpflags);
813 }
814 
815 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
816 {
817 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
818 }
819 
820 /*
821  * Calculate the number of objects and left-over bytes for a given buffer size.
822  */
823 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
824 			   size_t align, int flags, size_t *left_over,
825 			   unsigned int *num)
826 {
827 	int nr_objs;
828 	size_t mgmt_size;
829 	size_t slab_size = PAGE_SIZE << gfporder;
830 
831 	/*
832 	 * The slab management structure can be either off the slab or
833 	 * on it. For the latter case, the memory allocated for a
834 	 * slab is used for:
835 	 *
836 	 * - The struct slab
837 	 * - One kmem_bufctl_t for each object
838 	 * - Padding to respect alignment of @align
839 	 * - @buffer_size bytes for each object
840 	 *
841 	 * If the slab management structure is off the slab, then the
842 	 * alignment will already be calculated into the size. Because
843 	 * the slabs are all pages aligned, the objects will be at the
844 	 * correct alignment when allocated.
845 	 */
846 	if (flags & CFLGS_OFF_SLAB) {
847 		mgmt_size = 0;
848 		nr_objs = slab_size / buffer_size;
849 
850 		if (nr_objs > SLAB_LIMIT)
851 			nr_objs = SLAB_LIMIT;
852 	} else {
853 		/*
854 		 * Ignore padding for the initial guess. The padding
855 		 * is at most @align-1 bytes, and @buffer_size is at
856 		 * least @align. In the worst case, this result will
857 		 * be one greater than the number of objects that fit
858 		 * into the memory allocation when taking the padding
859 		 * into account.
860 		 */
861 		nr_objs = (slab_size - sizeof(struct slab)) /
862 			  (buffer_size + sizeof(kmem_bufctl_t));
863 
864 		/*
865 		 * This calculated number will be either the right
866 		 * amount, or one greater than what we want.
867 		 */
868 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
869 		       > slab_size)
870 			nr_objs--;
871 
872 		if (nr_objs > SLAB_LIMIT)
873 			nr_objs = SLAB_LIMIT;
874 
875 		mgmt_size = slab_mgmt_size(nr_objs, align);
876 	}
877 	*num = nr_objs;
878 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
879 }
880 
881 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
882 
883 static void __slab_error(const char *function, struct kmem_cache *cachep,
884 			char *msg)
885 {
886 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
887 	       function, cachep->name, msg);
888 	dump_stack();
889 }
890 
891 /*
892  * By default on NUMA we use alien caches to stage the freeing of
893  * objects allocated from other nodes. This causes massive memory
894  * inefficiencies when using fake NUMA setup to split memory into a
895  * large number of small nodes, so it can be disabled on the command
896  * line
897   */
898 
899 static int use_alien_caches __read_mostly = 1;
900 static int __init noaliencache_setup(char *s)
901 {
902 	use_alien_caches = 0;
903 	return 1;
904 }
905 __setup("noaliencache", noaliencache_setup);
906 
907 #ifdef CONFIG_NUMA
908 /*
909  * Special reaping functions for NUMA systems called from cache_reap().
910  * These take care of doing round robin flushing of alien caches (containing
911  * objects freed on different nodes from which they were allocated) and the
912  * flushing of remote pcps by calling drain_node_pages.
913  */
914 static DEFINE_PER_CPU(unsigned long, reap_node);
915 
916 static void init_reap_node(int cpu)
917 {
918 	int node;
919 
920 	node = next_node(cpu_to_node(cpu), node_online_map);
921 	if (node == MAX_NUMNODES)
922 		node = first_node(node_online_map);
923 
924 	per_cpu(reap_node, cpu) = node;
925 }
926 
927 static void next_reap_node(void)
928 {
929 	int node = __get_cpu_var(reap_node);
930 
931 	node = next_node(node, node_online_map);
932 	if (unlikely(node >= MAX_NUMNODES))
933 		node = first_node(node_online_map);
934 	__get_cpu_var(reap_node) = node;
935 }
936 
937 #else
938 #define init_reap_node(cpu) do { } while (0)
939 #define next_reap_node(void) do { } while (0)
940 #endif
941 
942 /*
943  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
944  * via the workqueue/eventd.
945  * Add the CPU number into the expiration time to minimize the possibility of
946  * the CPUs getting into lockstep and contending for the global cache chain
947  * lock.
948  */
949 static void __devinit start_cpu_timer(int cpu)
950 {
951 	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
952 
953 	/*
954 	 * When this gets called from do_initcalls via cpucache_init(),
955 	 * init_workqueues() has already run, so keventd will be setup
956 	 * at that time.
957 	 */
958 	if (keventd_up() && reap_work->work.func == NULL) {
959 		init_reap_node(cpu);
960 		INIT_DELAYED_WORK(reap_work, cache_reap);
961 		schedule_delayed_work_on(cpu, reap_work,
962 					__round_jiffies_relative(HZ, cpu));
963 	}
964 }
965 
966 static struct array_cache *alloc_arraycache(int node, int entries,
967 					    int batchcount)
968 {
969 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
970 	struct array_cache *nc = NULL;
971 
972 	nc = kmalloc_node(memsize, GFP_KERNEL, node);
973 	if (nc) {
974 		nc->avail = 0;
975 		nc->limit = entries;
976 		nc->batchcount = batchcount;
977 		nc->touched = 0;
978 		spin_lock_init(&nc->lock);
979 	}
980 	return nc;
981 }
982 
983 /*
984  * Transfer objects in one arraycache to another.
985  * Locking must be handled by the caller.
986  *
987  * Return the number of entries transferred.
988  */
989 static int transfer_objects(struct array_cache *to,
990 		struct array_cache *from, unsigned int max)
991 {
992 	/* Figure out how many entries to transfer */
993 	int nr = min(min(from->avail, max), to->limit - to->avail);
994 
995 	if (!nr)
996 		return 0;
997 
998 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
999 			sizeof(void *) *nr);
1000 
1001 	from->avail -= nr;
1002 	to->avail += nr;
1003 	to->touched = 1;
1004 	return nr;
1005 }
1006 
1007 #ifndef CONFIG_NUMA
1008 
1009 #define drain_alien_cache(cachep, alien) do { } while (0)
1010 #define reap_alien(cachep, l3) do { } while (0)
1011 
1012 static inline struct array_cache **alloc_alien_cache(int node, int limit)
1013 {
1014 	return (struct array_cache **)BAD_ALIEN_MAGIC;
1015 }
1016 
1017 static inline void free_alien_cache(struct array_cache **ac_ptr)
1018 {
1019 }
1020 
1021 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1022 {
1023 	return 0;
1024 }
1025 
1026 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1027 		gfp_t flags)
1028 {
1029 	return NULL;
1030 }
1031 
1032 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1033 		 gfp_t flags, int nodeid)
1034 {
1035 	return NULL;
1036 }
1037 
1038 #else	/* CONFIG_NUMA */
1039 
1040 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1041 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1042 
1043 static struct array_cache **alloc_alien_cache(int node, int limit)
1044 {
1045 	struct array_cache **ac_ptr;
1046 	int memsize = sizeof(void *) * nr_node_ids;
1047 	int i;
1048 
1049 	if (limit > 1)
1050 		limit = 12;
1051 	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1052 	if (ac_ptr) {
1053 		for_each_node(i) {
1054 			if (i == node || !node_online(i)) {
1055 				ac_ptr[i] = NULL;
1056 				continue;
1057 			}
1058 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1059 			if (!ac_ptr[i]) {
1060 				for (i--; i <= 0; i--)
1061 					kfree(ac_ptr[i]);
1062 				kfree(ac_ptr);
1063 				return NULL;
1064 			}
1065 		}
1066 	}
1067 	return ac_ptr;
1068 }
1069 
1070 static void free_alien_cache(struct array_cache **ac_ptr)
1071 {
1072 	int i;
1073 
1074 	if (!ac_ptr)
1075 		return;
1076 	for_each_node(i)
1077 	    kfree(ac_ptr[i]);
1078 	kfree(ac_ptr);
1079 }
1080 
1081 static void __drain_alien_cache(struct kmem_cache *cachep,
1082 				struct array_cache *ac, int node)
1083 {
1084 	struct kmem_list3 *rl3 = cachep->nodelists[node];
1085 
1086 	if (ac->avail) {
1087 		spin_lock(&rl3->list_lock);
1088 		/*
1089 		 * Stuff objects into the remote nodes shared array first.
1090 		 * That way we could avoid the overhead of putting the objects
1091 		 * into the free lists and getting them back later.
1092 		 */
1093 		if (rl3->shared)
1094 			transfer_objects(rl3->shared, ac, ac->limit);
1095 
1096 		free_block(cachep, ac->entry, ac->avail, node);
1097 		ac->avail = 0;
1098 		spin_unlock(&rl3->list_lock);
1099 	}
1100 }
1101 
1102 /*
1103  * Called from cache_reap() to regularly drain alien caches round robin.
1104  */
1105 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1106 {
1107 	int node = __get_cpu_var(reap_node);
1108 
1109 	if (l3->alien) {
1110 		struct array_cache *ac = l3->alien[node];
1111 
1112 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1113 			__drain_alien_cache(cachep, ac, node);
1114 			spin_unlock_irq(&ac->lock);
1115 		}
1116 	}
1117 }
1118 
1119 static void drain_alien_cache(struct kmem_cache *cachep,
1120 				struct array_cache **alien)
1121 {
1122 	int i = 0;
1123 	struct array_cache *ac;
1124 	unsigned long flags;
1125 
1126 	for_each_online_node(i) {
1127 		ac = alien[i];
1128 		if (ac) {
1129 			spin_lock_irqsave(&ac->lock, flags);
1130 			__drain_alien_cache(cachep, ac, i);
1131 			spin_unlock_irqrestore(&ac->lock, flags);
1132 		}
1133 	}
1134 }
1135 
1136 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1137 {
1138 	struct slab *slabp = virt_to_slab(objp);
1139 	int nodeid = slabp->nodeid;
1140 	struct kmem_list3 *l3;
1141 	struct array_cache *alien = NULL;
1142 	int node;
1143 
1144 	node = numa_node_id();
1145 
1146 	/*
1147 	 * Make sure we are not freeing a object from another node to the array
1148 	 * cache on this cpu.
1149 	 */
1150 	if (likely(slabp->nodeid == node))
1151 		return 0;
1152 
1153 	l3 = cachep->nodelists[node];
1154 	STATS_INC_NODEFREES(cachep);
1155 	if (l3->alien && l3->alien[nodeid]) {
1156 		alien = l3->alien[nodeid];
1157 		spin_lock(&alien->lock);
1158 		if (unlikely(alien->avail == alien->limit)) {
1159 			STATS_INC_ACOVERFLOW(cachep);
1160 			__drain_alien_cache(cachep, alien, nodeid);
1161 		}
1162 		alien->entry[alien->avail++] = objp;
1163 		spin_unlock(&alien->lock);
1164 	} else {
1165 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1166 		free_block(cachep, &objp, 1, nodeid);
1167 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1168 	}
1169 	return 1;
1170 }
1171 #endif
1172 
1173 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1174 				    unsigned long action, void *hcpu)
1175 {
1176 	long cpu = (long)hcpu;
1177 	struct kmem_cache *cachep;
1178 	struct kmem_list3 *l3 = NULL;
1179 	int node = cpu_to_node(cpu);
1180 	int memsize = sizeof(struct kmem_list3);
1181 
1182 	switch (action) {
1183 	case CPU_LOCK_ACQUIRE:
1184 		mutex_lock(&cache_chain_mutex);
1185 		break;
1186 	case CPU_UP_PREPARE:
1187 	case CPU_UP_PREPARE_FROZEN:
1188 		/*
1189 		 * We need to do this right in the beginning since
1190 		 * alloc_arraycache's are going to use this list.
1191 		 * kmalloc_node allows us to add the slab to the right
1192 		 * kmem_list3 and not this cpu's kmem_list3
1193 		 */
1194 
1195 		list_for_each_entry(cachep, &cache_chain, next) {
1196 			/*
1197 			 * Set up the size64 kmemlist for cpu before we can
1198 			 * begin anything. Make sure some other cpu on this
1199 			 * node has not already allocated this
1200 			 */
1201 			if (!cachep->nodelists[node]) {
1202 				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1203 				if (!l3)
1204 					goto bad;
1205 				kmem_list3_init(l3);
1206 				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1207 				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1208 
1209 				/*
1210 				 * The l3s don't come and go as CPUs come and
1211 				 * go.  cache_chain_mutex is sufficient
1212 				 * protection here.
1213 				 */
1214 				cachep->nodelists[node] = l3;
1215 			}
1216 
1217 			spin_lock_irq(&cachep->nodelists[node]->list_lock);
1218 			cachep->nodelists[node]->free_limit =
1219 				(1 + nr_cpus_node(node)) *
1220 				cachep->batchcount + cachep->num;
1221 			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1222 		}
1223 
1224 		/*
1225 		 * Now we can go ahead with allocating the shared arrays and
1226 		 * array caches
1227 		 */
1228 		list_for_each_entry(cachep, &cache_chain, next) {
1229 			struct array_cache *nc;
1230 			struct array_cache *shared = NULL;
1231 			struct array_cache **alien = NULL;
1232 
1233 			nc = alloc_arraycache(node, cachep->limit,
1234 						cachep->batchcount);
1235 			if (!nc)
1236 				goto bad;
1237 			if (cachep->shared) {
1238 				shared = alloc_arraycache(node,
1239 					cachep->shared * cachep->batchcount,
1240 					0xbaadf00d);
1241 				if (!shared)
1242 					goto bad;
1243 			}
1244 			if (use_alien_caches) {
1245                                 alien = alloc_alien_cache(node, cachep->limit);
1246                                 if (!alien)
1247                                         goto bad;
1248                         }
1249 			cachep->array[cpu] = nc;
1250 			l3 = cachep->nodelists[node];
1251 			BUG_ON(!l3);
1252 
1253 			spin_lock_irq(&l3->list_lock);
1254 			if (!l3->shared) {
1255 				/*
1256 				 * We are serialised from CPU_DEAD or
1257 				 * CPU_UP_CANCELLED by the cpucontrol lock
1258 				 */
1259 				l3->shared = shared;
1260 				shared = NULL;
1261 			}
1262 #ifdef CONFIG_NUMA
1263 			if (!l3->alien) {
1264 				l3->alien = alien;
1265 				alien = NULL;
1266 			}
1267 #endif
1268 			spin_unlock_irq(&l3->list_lock);
1269 			kfree(shared);
1270 			free_alien_cache(alien);
1271 		}
1272 		break;
1273 	case CPU_ONLINE:
1274 	case CPU_ONLINE_FROZEN:
1275 		start_cpu_timer(cpu);
1276 		break;
1277 #ifdef CONFIG_HOTPLUG_CPU
1278   	case CPU_DOWN_PREPARE:
1279   	case CPU_DOWN_PREPARE_FROZEN:
1280 		/*
1281 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1282 		 * held so that if cache_reap() is invoked it cannot do
1283 		 * anything expensive but will only modify reap_work
1284 		 * and reschedule the timer.
1285 		*/
1286 		cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1287 		/* Now the cache_reaper is guaranteed to be not running. */
1288 		per_cpu(reap_work, cpu).work.func = NULL;
1289   		break;
1290   	case CPU_DOWN_FAILED:
1291   	case CPU_DOWN_FAILED_FROZEN:
1292 		start_cpu_timer(cpu);
1293   		break;
1294 	case CPU_DEAD:
1295 	case CPU_DEAD_FROZEN:
1296 		/*
1297 		 * Even if all the cpus of a node are down, we don't free the
1298 		 * kmem_list3 of any cache. This to avoid a race between
1299 		 * cpu_down, and a kmalloc allocation from another cpu for
1300 		 * memory from the node of the cpu going down.  The list3
1301 		 * structure is usually allocated from kmem_cache_create() and
1302 		 * gets destroyed at kmem_cache_destroy().
1303 		 */
1304 		/* fall thru */
1305 #endif
1306 	case CPU_UP_CANCELED:
1307 	case CPU_UP_CANCELED_FROZEN:
1308 		list_for_each_entry(cachep, &cache_chain, next) {
1309 			struct array_cache *nc;
1310 			struct array_cache *shared;
1311 			struct array_cache **alien;
1312 			cpumask_t mask;
1313 
1314 			mask = node_to_cpumask(node);
1315 			/* cpu is dead; no one can alloc from it. */
1316 			nc = cachep->array[cpu];
1317 			cachep->array[cpu] = NULL;
1318 			l3 = cachep->nodelists[node];
1319 
1320 			if (!l3)
1321 				goto free_array_cache;
1322 
1323 			spin_lock_irq(&l3->list_lock);
1324 
1325 			/* Free limit for this kmem_list3 */
1326 			l3->free_limit -= cachep->batchcount;
1327 			if (nc)
1328 				free_block(cachep, nc->entry, nc->avail, node);
1329 
1330 			if (!cpus_empty(mask)) {
1331 				spin_unlock_irq(&l3->list_lock);
1332 				goto free_array_cache;
1333 			}
1334 
1335 			shared = l3->shared;
1336 			if (shared) {
1337 				free_block(cachep, shared->entry,
1338 					   shared->avail, node);
1339 				l3->shared = NULL;
1340 			}
1341 
1342 			alien = l3->alien;
1343 			l3->alien = NULL;
1344 
1345 			spin_unlock_irq(&l3->list_lock);
1346 
1347 			kfree(shared);
1348 			if (alien) {
1349 				drain_alien_cache(cachep, alien);
1350 				free_alien_cache(alien);
1351 			}
1352 free_array_cache:
1353 			kfree(nc);
1354 		}
1355 		/*
1356 		 * In the previous loop, all the objects were freed to
1357 		 * the respective cache's slabs,  now we can go ahead and
1358 		 * shrink each nodelist to its limit.
1359 		 */
1360 		list_for_each_entry(cachep, &cache_chain, next) {
1361 			l3 = cachep->nodelists[node];
1362 			if (!l3)
1363 				continue;
1364 			drain_freelist(cachep, l3, l3->free_objects);
1365 		}
1366 		break;
1367 	case CPU_LOCK_RELEASE:
1368 		mutex_unlock(&cache_chain_mutex);
1369 		break;
1370 	}
1371 	return NOTIFY_OK;
1372 bad:
1373 	return NOTIFY_BAD;
1374 }
1375 
1376 static struct notifier_block __cpuinitdata cpucache_notifier = {
1377 	&cpuup_callback, NULL, 0
1378 };
1379 
1380 /*
1381  * swap the static kmem_list3 with kmalloced memory
1382  */
1383 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1384 			int nodeid)
1385 {
1386 	struct kmem_list3 *ptr;
1387 
1388 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1389 	BUG_ON(!ptr);
1390 
1391 	local_irq_disable();
1392 	memcpy(ptr, list, sizeof(struct kmem_list3));
1393 	/*
1394 	 * Do not assume that spinlocks can be initialized via memcpy:
1395 	 */
1396 	spin_lock_init(&ptr->list_lock);
1397 
1398 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1399 	cachep->nodelists[nodeid] = ptr;
1400 	local_irq_enable();
1401 }
1402 
1403 /*
1404  * Initialisation.  Called after the page allocator have been initialised and
1405  * before smp_init().
1406  */
1407 void __init kmem_cache_init(void)
1408 {
1409 	size_t left_over;
1410 	struct cache_sizes *sizes;
1411 	struct cache_names *names;
1412 	int i;
1413 	int order;
1414 	int node;
1415 
1416 	if (num_possible_nodes() == 1)
1417 		use_alien_caches = 0;
1418 
1419 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1420 		kmem_list3_init(&initkmem_list3[i]);
1421 		if (i < MAX_NUMNODES)
1422 			cache_cache.nodelists[i] = NULL;
1423 	}
1424 
1425 	/*
1426 	 * Fragmentation resistance on low memory - only use bigger
1427 	 * page orders on machines with more than 32MB of memory.
1428 	 */
1429 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1430 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1431 
1432 	/* Bootstrap is tricky, because several objects are allocated
1433 	 * from caches that do not exist yet:
1434 	 * 1) initialize the cache_cache cache: it contains the struct
1435 	 *    kmem_cache structures of all caches, except cache_cache itself:
1436 	 *    cache_cache is statically allocated.
1437 	 *    Initially an __init data area is used for the head array and the
1438 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1439 	 *    array at the end of the bootstrap.
1440 	 * 2) Create the first kmalloc cache.
1441 	 *    The struct kmem_cache for the new cache is allocated normally.
1442 	 *    An __init data area is used for the head array.
1443 	 * 3) Create the remaining kmalloc caches, with minimally sized
1444 	 *    head arrays.
1445 	 * 4) Replace the __init data head arrays for cache_cache and the first
1446 	 *    kmalloc cache with kmalloc allocated arrays.
1447 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1448 	 *    the other cache's with kmalloc allocated memory.
1449 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1450 	 */
1451 
1452 	node = numa_node_id();
1453 
1454 	/* 1) create the cache_cache */
1455 	INIT_LIST_HEAD(&cache_chain);
1456 	list_add(&cache_cache.next, &cache_chain);
1457 	cache_cache.colour_off = cache_line_size();
1458 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1459 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1460 
1461 	/*
1462 	 * struct kmem_cache size depends on nr_node_ids, which
1463 	 * can be less than MAX_NUMNODES.
1464 	 */
1465 	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1466 				 nr_node_ids * sizeof(struct kmem_list3 *);
1467 #if DEBUG
1468 	cache_cache.obj_size = cache_cache.buffer_size;
1469 #endif
1470 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1471 					cache_line_size());
1472 	cache_cache.reciprocal_buffer_size =
1473 		reciprocal_value(cache_cache.buffer_size);
1474 
1475 	for (order = 0; order < MAX_ORDER; order++) {
1476 		cache_estimate(order, cache_cache.buffer_size,
1477 			cache_line_size(), 0, &left_over, &cache_cache.num);
1478 		if (cache_cache.num)
1479 			break;
1480 	}
1481 	BUG_ON(!cache_cache.num);
1482 	cache_cache.gfporder = order;
1483 	cache_cache.colour = left_over / cache_cache.colour_off;
1484 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1485 				      sizeof(struct slab), cache_line_size());
1486 
1487 	/* 2+3) create the kmalloc caches */
1488 	sizes = malloc_sizes;
1489 	names = cache_names;
1490 
1491 	/*
1492 	 * Initialize the caches that provide memory for the array cache and the
1493 	 * kmem_list3 structures first.  Without this, further allocations will
1494 	 * bug.
1495 	 */
1496 
1497 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1498 					sizes[INDEX_AC].cs_size,
1499 					ARCH_KMALLOC_MINALIGN,
1500 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1501 					NULL, NULL);
1502 
1503 	if (INDEX_AC != INDEX_L3) {
1504 		sizes[INDEX_L3].cs_cachep =
1505 			kmem_cache_create(names[INDEX_L3].name,
1506 				sizes[INDEX_L3].cs_size,
1507 				ARCH_KMALLOC_MINALIGN,
1508 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1509 				NULL, NULL);
1510 	}
1511 
1512 	slab_early_init = 0;
1513 
1514 	while (sizes->cs_size != ULONG_MAX) {
1515 		/*
1516 		 * For performance, all the general caches are L1 aligned.
1517 		 * This should be particularly beneficial on SMP boxes, as it
1518 		 * eliminates "false sharing".
1519 		 * Note for systems short on memory removing the alignment will
1520 		 * allow tighter packing of the smaller caches.
1521 		 */
1522 		if (!sizes->cs_cachep) {
1523 			sizes->cs_cachep = kmem_cache_create(names->name,
1524 					sizes->cs_size,
1525 					ARCH_KMALLOC_MINALIGN,
1526 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1527 					NULL, NULL);
1528 		}
1529 #ifdef CONFIG_ZONE_DMA
1530 		sizes->cs_dmacachep = kmem_cache_create(
1531 					names->name_dma,
1532 					sizes->cs_size,
1533 					ARCH_KMALLOC_MINALIGN,
1534 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1535 						SLAB_PANIC,
1536 					NULL, NULL);
1537 #endif
1538 		sizes++;
1539 		names++;
1540 	}
1541 	/* 4) Replace the bootstrap head arrays */
1542 	{
1543 		struct array_cache *ptr;
1544 
1545 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1546 
1547 		local_irq_disable();
1548 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1549 		memcpy(ptr, cpu_cache_get(&cache_cache),
1550 		       sizeof(struct arraycache_init));
1551 		/*
1552 		 * Do not assume that spinlocks can be initialized via memcpy:
1553 		 */
1554 		spin_lock_init(&ptr->lock);
1555 
1556 		cache_cache.array[smp_processor_id()] = ptr;
1557 		local_irq_enable();
1558 
1559 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1560 
1561 		local_irq_disable();
1562 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1563 		       != &initarray_generic.cache);
1564 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1565 		       sizeof(struct arraycache_init));
1566 		/*
1567 		 * Do not assume that spinlocks can be initialized via memcpy:
1568 		 */
1569 		spin_lock_init(&ptr->lock);
1570 
1571 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1572 		    ptr;
1573 		local_irq_enable();
1574 	}
1575 	/* 5) Replace the bootstrap kmem_list3's */
1576 	{
1577 		int nid;
1578 
1579 		/* Replace the static kmem_list3 structures for the boot cpu */
1580 		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1581 
1582 		for_each_online_node(nid) {
1583 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1584 				  &initkmem_list3[SIZE_AC + nid], nid);
1585 
1586 			if (INDEX_AC != INDEX_L3) {
1587 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1588 					  &initkmem_list3[SIZE_L3 + nid], nid);
1589 			}
1590 		}
1591 	}
1592 
1593 	/* 6) resize the head arrays to their final sizes */
1594 	{
1595 		struct kmem_cache *cachep;
1596 		mutex_lock(&cache_chain_mutex);
1597 		list_for_each_entry(cachep, &cache_chain, next)
1598 			if (enable_cpucache(cachep))
1599 				BUG();
1600 		mutex_unlock(&cache_chain_mutex);
1601 	}
1602 
1603 	/* Annotate slab for lockdep -- annotate the malloc caches */
1604 	init_lock_keys();
1605 
1606 
1607 	/* Done! */
1608 	g_cpucache_up = FULL;
1609 
1610 	/*
1611 	 * Register a cpu startup notifier callback that initializes
1612 	 * cpu_cache_get for all new cpus
1613 	 */
1614 	register_cpu_notifier(&cpucache_notifier);
1615 
1616 	/*
1617 	 * The reap timers are started later, with a module init call: That part
1618 	 * of the kernel is not yet operational.
1619 	 */
1620 }
1621 
1622 static int __init cpucache_init(void)
1623 {
1624 	int cpu;
1625 
1626 	/*
1627 	 * Register the timers that return unneeded pages to the page allocator
1628 	 */
1629 	for_each_online_cpu(cpu)
1630 		start_cpu_timer(cpu);
1631 	return 0;
1632 }
1633 __initcall(cpucache_init);
1634 
1635 /*
1636  * Interface to system's page allocator. No need to hold the cache-lock.
1637  *
1638  * If we requested dmaable memory, we will get it. Even if we
1639  * did not request dmaable memory, we might get it, but that
1640  * would be relatively rare and ignorable.
1641  */
1642 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1643 {
1644 	struct page *page;
1645 	int nr_pages;
1646 	int i;
1647 
1648 #ifndef CONFIG_MMU
1649 	/*
1650 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1651 	 * requires __GFP_COMP to properly refcount higher order allocations
1652 	 */
1653 	flags |= __GFP_COMP;
1654 #endif
1655 
1656 	flags |= cachep->gfpflags;
1657 
1658 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1659 	if (!page)
1660 		return NULL;
1661 
1662 	nr_pages = (1 << cachep->gfporder);
1663 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1664 		add_zone_page_state(page_zone(page),
1665 			NR_SLAB_RECLAIMABLE, nr_pages);
1666 	else
1667 		add_zone_page_state(page_zone(page),
1668 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1669 	for (i = 0; i < nr_pages; i++)
1670 		__SetPageSlab(page + i);
1671 	return page_address(page);
1672 }
1673 
1674 /*
1675  * Interface to system's page release.
1676  */
1677 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1678 {
1679 	unsigned long i = (1 << cachep->gfporder);
1680 	struct page *page = virt_to_page(addr);
1681 	const unsigned long nr_freed = i;
1682 
1683 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1684 		sub_zone_page_state(page_zone(page),
1685 				NR_SLAB_RECLAIMABLE, nr_freed);
1686 	else
1687 		sub_zone_page_state(page_zone(page),
1688 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1689 	while (i--) {
1690 		BUG_ON(!PageSlab(page));
1691 		__ClearPageSlab(page);
1692 		page++;
1693 	}
1694 	if (current->reclaim_state)
1695 		current->reclaim_state->reclaimed_slab += nr_freed;
1696 	free_pages((unsigned long)addr, cachep->gfporder);
1697 }
1698 
1699 static void kmem_rcu_free(struct rcu_head *head)
1700 {
1701 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1702 	struct kmem_cache *cachep = slab_rcu->cachep;
1703 
1704 	kmem_freepages(cachep, slab_rcu->addr);
1705 	if (OFF_SLAB(cachep))
1706 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1707 }
1708 
1709 #if DEBUG
1710 
1711 #ifdef CONFIG_DEBUG_PAGEALLOC
1712 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1713 			    unsigned long caller)
1714 {
1715 	int size = obj_size(cachep);
1716 
1717 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1718 
1719 	if (size < 5 * sizeof(unsigned long))
1720 		return;
1721 
1722 	*addr++ = 0x12345678;
1723 	*addr++ = caller;
1724 	*addr++ = smp_processor_id();
1725 	size -= 3 * sizeof(unsigned long);
1726 	{
1727 		unsigned long *sptr = &caller;
1728 		unsigned long svalue;
1729 
1730 		while (!kstack_end(sptr)) {
1731 			svalue = *sptr++;
1732 			if (kernel_text_address(svalue)) {
1733 				*addr++ = svalue;
1734 				size -= sizeof(unsigned long);
1735 				if (size <= sizeof(unsigned long))
1736 					break;
1737 			}
1738 		}
1739 
1740 	}
1741 	*addr++ = 0x87654321;
1742 }
1743 #endif
1744 
1745 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1746 {
1747 	int size = obj_size(cachep);
1748 	addr = &((char *)addr)[obj_offset(cachep)];
1749 
1750 	memset(addr, val, size);
1751 	*(unsigned char *)(addr + size - 1) = POISON_END;
1752 }
1753 
1754 static void dump_line(char *data, int offset, int limit)
1755 {
1756 	int i;
1757 	unsigned char error = 0;
1758 	int bad_count = 0;
1759 
1760 	printk(KERN_ERR "%03x:", offset);
1761 	for (i = 0; i < limit; i++) {
1762 		if (data[offset + i] != POISON_FREE) {
1763 			error = data[offset + i];
1764 			bad_count++;
1765 		}
1766 		printk(" %02x", (unsigned char)data[offset + i]);
1767 	}
1768 	printk("\n");
1769 
1770 	if (bad_count == 1) {
1771 		error ^= POISON_FREE;
1772 		if (!(error & (error - 1))) {
1773 			printk(KERN_ERR "Single bit error detected. Probably "
1774 					"bad RAM.\n");
1775 #ifdef CONFIG_X86
1776 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1777 					"test tool.\n");
1778 #else
1779 			printk(KERN_ERR "Run a memory test tool.\n");
1780 #endif
1781 		}
1782 	}
1783 }
1784 #endif
1785 
1786 #if DEBUG
1787 
1788 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1789 {
1790 	int i, size;
1791 	char *realobj;
1792 
1793 	if (cachep->flags & SLAB_RED_ZONE) {
1794 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1795 			*dbg_redzone1(cachep, objp),
1796 			*dbg_redzone2(cachep, objp));
1797 	}
1798 
1799 	if (cachep->flags & SLAB_STORE_USER) {
1800 		printk(KERN_ERR "Last user: [<%p>]",
1801 			*dbg_userword(cachep, objp));
1802 		print_symbol("(%s)",
1803 				(unsigned long)*dbg_userword(cachep, objp));
1804 		printk("\n");
1805 	}
1806 	realobj = (char *)objp + obj_offset(cachep);
1807 	size = obj_size(cachep);
1808 	for (i = 0; i < size && lines; i += 16, lines--) {
1809 		int limit;
1810 		limit = 16;
1811 		if (i + limit > size)
1812 			limit = size - i;
1813 		dump_line(realobj, i, limit);
1814 	}
1815 }
1816 
1817 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1818 {
1819 	char *realobj;
1820 	int size, i;
1821 	int lines = 0;
1822 
1823 	realobj = (char *)objp + obj_offset(cachep);
1824 	size = obj_size(cachep);
1825 
1826 	for (i = 0; i < size; i++) {
1827 		char exp = POISON_FREE;
1828 		if (i == size - 1)
1829 			exp = POISON_END;
1830 		if (realobj[i] != exp) {
1831 			int limit;
1832 			/* Mismatch ! */
1833 			/* Print header */
1834 			if (lines == 0) {
1835 				printk(KERN_ERR
1836 					"Slab corruption: %s start=%p, len=%d\n",
1837 					cachep->name, realobj, size);
1838 				print_objinfo(cachep, objp, 0);
1839 			}
1840 			/* Hexdump the affected line */
1841 			i = (i / 16) * 16;
1842 			limit = 16;
1843 			if (i + limit > size)
1844 				limit = size - i;
1845 			dump_line(realobj, i, limit);
1846 			i += 16;
1847 			lines++;
1848 			/* Limit to 5 lines */
1849 			if (lines > 5)
1850 				break;
1851 		}
1852 	}
1853 	if (lines != 0) {
1854 		/* Print some data about the neighboring objects, if they
1855 		 * exist:
1856 		 */
1857 		struct slab *slabp = virt_to_slab(objp);
1858 		unsigned int objnr;
1859 
1860 		objnr = obj_to_index(cachep, slabp, objp);
1861 		if (objnr) {
1862 			objp = index_to_obj(cachep, slabp, objnr - 1);
1863 			realobj = (char *)objp + obj_offset(cachep);
1864 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1865 			       realobj, size);
1866 			print_objinfo(cachep, objp, 2);
1867 		}
1868 		if (objnr + 1 < cachep->num) {
1869 			objp = index_to_obj(cachep, slabp, objnr + 1);
1870 			realobj = (char *)objp + obj_offset(cachep);
1871 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1872 			       realobj, size);
1873 			print_objinfo(cachep, objp, 2);
1874 		}
1875 	}
1876 }
1877 #endif
1878 
1879 #if DEBUG
1880 /**
1881  * slab_destroy_objs - destroy a slab and its objects
1882  * @cachep: cache pointer being destroyed
1883  * @slabp: slab pointer being destroyed
1884  *
1885  * Call the registered destructor for each object in a slab that is being
1886  * destroyed.
1887  */
1888 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1889 {
1890 	int i;
1891 	for (i = 0; i < cachep->num; i++) {
1892 		void *objp = index_to_obj(cachep, slabp, i);
1893 
1894 		if (cachep->flags & SLAB_POISON) {
1895 #ifdef CONFIG_DEBUG_PAGEALLOC
1896 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1897 					OFF_SLAB(cachep))
1898 				kernel_map_pages(virt_to_page(objp),
1899 					cachep->buffer_size / PAGE_SIZE, 1);
1900 			else
1901 				check_poison_obj(cachep, objp);
1902 #else
1903 			check_poison_obj(cachep, objp);
1904 #endif
1905 		}
1906 		if (cachep->flags & SLAB_RED_ZONE) {
1907 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1908 				slab_error(cachep, "start of a freed object "
1909 					   "was overwritten");
1910 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1911 				slab_error(cachep, "end of a freed object "
1912 					   "was overwritten");
1913 		}
1914 		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1915 			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1916 	}
1917 }
1918 #else
1919 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1920 {
1921 	if (cachep->dtor) {
1922 		int i;
1923 		for (i = 0; i < cachep->num; i++) {
1924 			void *objp = index_to_obj(cachep, slabp, i);
1925 			(cachep->dtor) (objp, cachep, 0);
1926 		}
1927 	}
1928 }
1929 #endif
1930 
1931 /**
1932  * slab_destroy - destroy and release all objects in a slab
1933  * @cachep: cache pointer being destroyed
1934  * @slabp: slab pointer being destroyed
1935  *
1936  * Destroy all the objs in a slab, and release the mem back to the system.
1937  * Before calling the slab must have been unlinked from the cache.  The
1938  * cache-lock is not held/needed.
1939  */
1940 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1941 {
1942 	void *addr = slabp->s_mem - slabp->colouroff;
1943 
1944 	slab_destroy_objs(cachep, slabp);
1945 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1946 		struct slab_rcu *slab_rcu;
1947 
1948 		slab_rcu = (struct slab_rcu *)slabp;
1949 		slab_rcu->cachep = cachep;
1950 		slab_rcu->addr = addr;
1951 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1952 	} else {
1953 		kmem_freepages(cachep, addr);
1954 		if (OFF_SLAB(cachep))
1955 			kmem_cache_free(cachep->slabp_cache, slabp);
1956 	}
1957 }
1958 
1959 /*
1960  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1961  * size of kmem_list3.
1962  */
1963 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1964 {
1965 	int node;
1966 
1967 	for_each_online_node(node) {
1968 		cachep->nodelists[node] = &initkmem_list3[index + node];
1969 		cachep->nodelists[node]->next_reap = jiffies +
1970 		    REAPTIMEOUT_LIST3 +
1971 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1972 	}
1973 }
1974 
1975 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1976 {
1977 	int i;
1978 	struct kmem_list3 *l3;
1979 
1980 	for_each_online_cpu(i)
1981 	    kfree(cachep->array[i]);
1982 
1983 	/* NUMA: free the list3 structures */
1984 	for_each_online_node(i) {
1985 		l3 = cachep->nodelists[i];
1986 		if (l3) {
1987 			kfree(l3->shared);
1988 			free_alien_cache(l3->alien);
1989 			kfree(l3);
1990 		}
1991 	}
1992 	kmem_cache_free(&cache_cache, cachep);
1993 }
1994 
1995 
1996 /**
1997  * calculate_slab_order - calculate size (page order) of slabs
1998  * @cachep: pointer to the cache that is being created
1999  * @size: size of objects to be created in this cache.
2000  * @align: required alignment for the objects.
2001  * @flags: slab allocation flags
2002  *
2003  * Also calculates the number of objects per slab.
2004  *
2005  * This could be made much more intelligent.  For now, try to avoid using
2006  * high order pages for slabs.  When the gfp() functions are more friendly
2007  * towards high-order requests, this should be changed.
2008  */
2009 static size_t calculate_slab_order(struct kmem_cache *cachep,
2010 			size_t size, size_t align, unsigned long flags)
2011 {
2012 	unsigned long offslab_limit;
2013 	size_t left_over = 0;
2014 	int gfporder;
2015 
2016 	for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
2017 		unsigned int num;
2018 		size_t remainder;
2019 
2020 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2021 		if (!num)
2022 			continue;
2023 
2024 		if (flags & CFLGS_OFF_SLAB) {
2025 			/*
2026 			 * Max number of objs-per-slab for caches which
2027 			 * use off-slab slabs. Needed to avoid a possible
2028 			 * looping condition in cache_grow().
2029 			 */
2030 			offslab_limit = size - sizeof(struct slab);
2031 			offslab_limit /= sizeof(kmem_bufctl_t);
2032 
2033  			if (num > offslab_limit)
2034 				break;
2035 		}
2036 
2037 		/* Found something acceptable - save it away */
2038 		cachep->num = num;
2039 		cachep->gfporder = gfporder;
2040 		left_over = remainder;
2041 
2042 		/*
2043 		 * A VFS-reclaimable slab tends to have most allocations
2044 		 * as GFP_NOFS and we really don't want to have to be allocating
2045 		 * higher-order pages when we are unable to shrink dcache.
2046 		 */
2047 		if (flags & SLAB_RECLAIM_ACCOUNT)
2048 			break;
2049 
2050 		/*
2051 		 * Large number of objects is good, but very large slabs are
2052 		 * currently bad for the gfp()s.
2053 		 */
2054 		if (gfporder >= slab_break_gfp_order)
2055 			break;
2056 
2057 		/*
2058 		 * Acceptable internal fragmentation?
2059 		 */
2060 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2061 			break;
2062 	}
2063 	return left_over;
2064 }
2065 
2066 static int setup_cpu_cache(struct kmem_cache *cachep)
2067 {
2068 	if (g_cpucache_up == FULL)
2069 		return enable_cpucache(cachep);
2070 
2071 	if (g_cpucache_up == NONE) {
2072 		/*
2073 		 * Note: the first kmem_cache_create must create the cache
2074 		 * that's used by kmalloc(24), otherwise the creation of
2075 		 * further caches will BUG().
2076 		 */
2077 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2078 
2079 		/*
2080 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2081 		 * the first cache, then we need to set up all its list3s,
2082 		 * otherwise the creation of further caches will BUG().
2083 		 */
2084 		set_up_list3s(cachep, SIZE_AC);
2085 		if (INDEX_AC == INDEX_L3)
2086 			g_cpucache_up = PARTIAL_L3;
2087 		else
2088 			g_cpucache_up = PARTIAL_AC;
2089 	} else {
2090 		cachep->array[smp_processor_id()] =
2091 			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2092 
2093 		if (g_cpucache_up == PARTIAL_AC) {
2094 			set_up_list3s(cachep, SIZE_L3);
2095 			g_cpucache_up = PARTIAL_L3;
2096 		} else {
2097 			int node;
2098 			for_each_online_node(node) {
2099 				cachep->nodelists[node] =
2100 				    kmalloc_node(sizeof(struct kmem_list3),
2101 						GFP_KERNEL, node);
2102 				BUG_ON(!cachep->nodelists[node]);
2103 				kmem_list3_init(cachep->nodelists[node]);
2104 			}
2105 		}
2106 	}
2107 	cachep->nodelists[numa_node_id()]->next_reap =
2108 			jiffies + REAPTIMEOUT_LIST3 +
2109 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2110 
2111 	cpu_cache_get(cachep)->avail = 0;
2112 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2113 	cpu_cache_get(cachep)->batchcount = 1;
2114 	cpu_cache_get(cachep)->touched = 0;
2115 	cachep->batchcount = 1;
2116 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2117 	return 0;
2118 }
2119 
2120 /**
2121  * kmem_cache_create - Create a cache.
2122  * @name: A string which is used in /proc/slabinfo to identify this cache.
2123  * @size: The size of objects to be created in this cache.
2124  * @align: The required alignment for the objects.
2125  * @flags: SLAB flags
2126  * @ctor: A constructor for the objects.
2127  * @dtor: A destructor for the objects.
2128  *
2129  * Returns a ptr to the cache on success, NULL on failure.
2130  * Cannot be called within a int, but can be interrupted.
2131  * The @ctor is run when new pages are allocated by the cache
2132  * and the @dtor is run before the pages are handed back.
2133  *
2134  * @name must be valid until the cache is destroyed. This implies that
2135  * the module calling this has to destroy the cache before getting unloaded.
2136  *
2137  * The flags are
2138  *
2139  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2140  * to catch references to uninitialised memory.
2141  *
2142  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2143  * for buffer overruns.
2144  *
2145  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2146  * cacheline.  This can be beneficial if you're counting cycles as closely
2147  * as davem.
2148  */
2149 struct kmem_cache *
2150 kmem_cache_create (const char *name, size_t size, size_t align,
2151 	unsigned long flags,
2152 	void (*ctor)(void*, struct kmem_cache *, unsigned long),
2153 	void (*dtor)(void*, struct kmem_cache *, unsigned long))
2154 {
2155 	size_t left_over, slab_size, ralign;
2156 	struct kmem_cache *cachep = NULL, *pc;
2157 
2158 	/*
2159 	 * Sanity checks... these are all serious usage bugs.
2160 	 */
2161 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2162 	    (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
2163 		printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2164 				name);
2165 		BUG();
2166 	}
2167 
2168 	/*
2169 	 * We use cache_chain_mutex to ensure a consistent view of
2170 	 * cpu_online_map as well.  Please see cpuup_callback
2171 	 */
2172 	mutex_lock(&cache_chain_mutex);
2173 
2174 	list_for_each_entry(pc, &cache_chain, next) {
2175 		char tmp;
2176 		int res;
2177 
2178 		/*
2179 		 * This happens when the module gets unloaded and doesn't
2180 		 * destroy its slab cache and no-one else reuses the vmalloc
2181 		 * area of the module.  Print a warning.
2182 		 */
2183 		res = probe_kernel_address(pc->name, tmp);
2184 		if (res) {
2185 			printk(KERN_ERR
2186 			       "SLAB: cache with size %d has lost its name\n",
2187 			       pc->buffer_size);
2188 			continue;
2189 		}
2190 
2191 		if (!strcmp(pc->name, name)) {
2192 			printk(KERN_ERR
2193 			       "kmem_cache_create: duplicate cache %s\n", name);
2194 			dump_stack();
2195 			goto oops;
2196 		}
2197 	}
2198 
2199 #if DEBUG
2200 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2201 #if FORCED_DEBUG
2202 	/*
2203 	 * Enable redzoning and last user accounting, except for caches with
2204 	 * large objects, if the increased size would increase the object size
2205 	 * above the next power of two: caches with object sizes just above a
2206 	 * power of two have a significant amount of internal fragmentation.
2207 	 */
2208 	if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
2209 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2210 	if (!(flags & SLAB_DESTROY_BY_RCU))
2211 		flags |= SLAB_POISON;
2212 #endif
2213 	if (flags & SLAB_DESTROY_BY_RCU)
2214 		BUG_ON(flags & SLAB_POISON);
2215 #endif
2216 	if (flags & SLAB_DESTROY_BY_RCU)
2217 		BUG_ON(dtor);
2218 
2219 	/*
2220 	 * Always checks flags, a caller might be expecting debug support which
2221 	 * isn't available.
2222 	 */
2223 	BUG_ON(flags & ~CREATE_MASK);
2224 
2225 	/*
2226 	 * Check that size is in terms of words.  This is needed to avoid
2227 	 * unaligned accesses for some archs when redzoning is used, and makes
2228 	 * sure any on-slab bufctl's are also correctly aligned.
2229 	 */
2230 	if (size & (BYTES_PER_WORD - 1)) {
2231 		size += (BYTES_PER_WORD - 1);
2232 		size &= ~(BYTES_PER_WORD - 1);
2233 	}
2234 
2235 	/* calculate the final buffer alignment: */
2236 
2237 	/* 1) arch recommendation: can be overridden for debug */
2238 	if (flags & SLAB_HWCACHE_ALIGN) {
2239 		/*
2240 		 * Default alignment: as specified by the arch code.  Except if
2241 		 * an object is really small, then squeeze multiple objects into
2242 		 * one cacheline.
2243 		 */
2244 		ralign = cache_line_size();
2245 		while (size <= ralign / 2)
2246 			ralign /= 2;
2247 	} else {
2248 		ralign = BYTES_PER_WORD;
2249 	}
2250 
2251 	/*
2252 	 * Redzoning and user store require word alignment. Note this will be
2253 	 * overridden by architecture or caller mandated alignment if either
2254 	 * is greater than BYTES_PER_WORD.
2255 	 */
2256 	if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
2257 		ralign = __alignof__(unsigned long long);
2258 
2259 	/* 2) arch mandated alignment */
2260 	if (ralign < ARCH_SLAB_MINALIGN) {
2261 		ralign = ARCH_SLAB_MINALIGN;
2262 	}
2263 	/* 3) caller mandated alignment */
2264 	if (ralign < align) {
2265 		ralign = align;
2266 	}
2267 	/* disable debug if necessary */
2268 	if (ralign > __alignof__(unsigned long long))
2269 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2270 	/*
2271 	 * 4) Store it.
2272 	 */
2273 	align = ralign;
2274 
2275 	/* Get cache's description obj. */
2276 	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2277 	if (!cachep)
2278 		goto oops;
2279 
2280 #if DEBUG
2281 	cachep->obj_size = size;
2282 
2283 	/*
2284 	 * Both debugging options require word-alignment which is calculated
2285 	 * into align above.
2286 	 */
2287 	if (flags & SLAB_RED_ZONE) {
2288 		/* add space for red zone words */
2289 		cachep->obj_offset += sizeof(unsigned long long);
2290 		size += 2 * sizeof(unsigned long long);
2291 	}
2292 	if (flags & SLAB_STORE_USER) {
2293 		/* user store requires one word storage behind the end of
2294 		 * the real object.
2295 		 */
2296 		size += BYTES_PER_WORD;
2297 	}
2298 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2299 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2300 	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2301 		cachep->obj_offset += PAGE_SIZE - size;
2302 		size = PAGE_SIZE;
2303 	}
2304 #endif
2305 #endif
2306 
2307 	/*
2308 	 * Determine if the slab management is 'on' or 'off' slab.
2309 	 * (bootstrapping cannot cope with offslab caches so don't do
2310 	 * it too early on.)
2311 	 */
2312 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
2313 		/*
2314 		 * Size is large, assume best to place the slab management obj
2315 		 * off-slab (should allow better packing of objs).
2316 		 */
2317 		flags |= CFLGS_OFF_SLAB;
2318 
2319 	size = ALIGN(size, align);
2320 
2321 	left_over = calculate_slab_order(cachep, size, align, flags);
2322 
2323 	if (!cachep->num) {
2324 		printk(KERN_ERR
2325 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2326 		kmem_cache_free(&cache_cache, cachep);
2327 		cachep = NULL;
2328 		goto oops;
2329 	}
2330 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2331 			  + sizeof(struct slab), align);
2332 
2333 	/*
2334 	 * If the slab has been placed off-slab, and we have enough space then
2335 	 * move it on-slab. This is at the expense of any extra colouring.
2336 	 */
2337 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2338 		flags &= ~CFLGS_OFF_SLAB;
2339 		left_over -= slab_size;
2340 	}
2341 
2342 	if (flags & CFLGS_OFF_SLAB) {
2343 		/* really off slab. No need for manual alignment */
2344 		slab_size =
2345 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2346 	}
2347 
2348 	cachep->colour_off = cache_line_size();
2349 	/* Offset must be a multiple of the alignment. */
2350 	if (cachep->colour_off < align)
2351 		cachep->colour_off = align;
2352 	cachep->colour = left_over / cachep->colour_off;
2353 	cachep->slab_size = slab_size;
2354 	cachep->flags = flags;
2355 	cachep->gfpflags = 0;
2356 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2357 		cachep->gfpflags |= GFP_DMA;
2358 	cachep->buffer_size = size;
2359 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2360 
2361 	if (flags & CFLGS_OFF_SLAB) {
2362 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2363 		/*
2364 		 * This is a possibility for one of the malloc_sizes caches.
2365 		 * But since we go off slab only for object size greater than
2366 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2367 		 * this should not happen at all.
2368 		 * But leave a BUG_ON for some lucky dude.
2369 		 */
2370 		BUG_ON(!cachep->slabp_cache);
2371 	}
2372 	cachep->ctor = ctor;
2373 	cachep->dtor = dtor;
2374 	cachep->name = name;
2375 
2376 	if (setup_cpu_cache(cachep)) {
2377 		__kmem_cache_destroy(cachep);
2378 		cachep = NULL;
2379 		goto oops;
2380 	}
2381 
2382 	/* cache setup completed, link it into the list */
2383 	list_add(&cachep->next, &cache_chain);
2384 oops:
2385 	if (!cachep && (flags & SLAB_PANIC))
2386 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2387 		      name);
2388 	mutex_unlock(&cache_chain_mutex);
2389 	return cachep;
2390 }
2391 EXPORT_SYMBOL(kmem_cache_create);
2392 
2393 #if DEBUG
2394 static void check_irq_off(void)
2395 {
2396 	BUG_ON(!irqs_disabled());
2397 }
2398 
2399 static void check_irq_on(void)
2400 {
2401 	BUG_ON(irqs_disabled());
2402 }
2403 
2404 static void check_spinlock_acquired(struct kmem_cache *cachep)
2405 {
2406 #ifdef CONFIG_SMP
2407 	check_irq_off();
2408 	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2409 #endif
2410 }
2411 
2412 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2413 {
2414 #ifdef CONFIG_SMP
2415 	check_irq_off();
2416 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2417 #endif
2418 }
2419 
2420 #else
2421 #define check_irq_off()	do { } while(0)
2422 #define check_irq_on()	do { } while(0)
2423 #define check_spinlock_acquired(x) do { } while(0)
2424 #define check_spinlock_acquired_node(x, y) do { } while(0)
2425 #endif
2426 
2427 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2428 			struct array_cache *ac,
2429 			int force, int node);
2430 
2431 static void do_drain(void *arg)
2432 {
2433 	struct kmem_cache *cachep = arg;
2434 	struct array_cache *ac;
2435 	int node = numa_node_id();
2436 
2437 	check_irq_off();
2438 	ac = cpu_cache_get(cachep);
2439 	spin_lock(&cachep->nodelists[node]->list_lock);
2440 	free_block(cachep, ac->entry, ac->avail, node);
2441 	spin_unlock(&cachep->nodelists[node]->list_lock);
2442 	ac->avail = 0;
2443 }
2444 
2445 static void drain_cpu_caches(struct kmem_cache *cachep)
2446 {
2447 	struct kmem_list3 *l3;
2448 	int node;
2449 
2450 	on_each_cpu(do_drain, cachep, 1, 1);
2451 	check_irq_on();
2452 	for_each_online_node(node) {
2453 		l3 = cachep->nodelists[node];
2454 		if (l3 && l3->alien)
2455 			drain_alien_cache(cachep, l3->alien);
2456 	}
2457 
2458 	for_each_online_node(node) {
2459 		l3 = cachep->nodelists[node];
2460 		if (l3)
2461 			drain_array(cachep, l3, l3->shared, 1, node);
2462 	}
2463 }
2464 
2465 /*
2466  * Remove slabs from the list of free slabs.
2467  * Specify the number of slabs to drain in tofree.
2468  *
2469  * Returns the actual number of slabs released.
2470  */
2471 static int drain_freelist(struct kmem_cache *cache,
2472 			struct kmem_list3 *l3, int tofree)
2473 {
2474 	struct list_head *p;
2475 	int nr_freed;
2476 	struct slab *slabp;
2477 
2478 	nr_freed = 0;
2479 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2480 
2481 		spin_lock_irq(&l3->list_lock);
2482 		p = l3->slabs_free.prev;
2483 		if (p == &l3->slabs_free) {
2484 			spin_unlock_irq(&l3->list_lock);
2485 			goto out;
2486 		}
2487 
2488 		slabp = list_entry(p, struct slab, list);
2489 #if DEBUG
2490 		BUG_ON(slabp->inuse);
2491 #endif
2492 		list_del(&slabp->list);
2493 		/*
2494 		 * Safe to drop the lock. The slab is no longer linked
2495 		 * to the cache.
2496 		 */
2497 		l3->free_objects -= cache->num;
2498 		spin_unlock_irq(&l3->list_lock);
2499 		slab_destroy(cache, slabp);
2500 		nr_freed++;
2501 	}
2502 out:
2503 	return nr_freed;
2504 }
2505 
2506 /* Called with cache_chain_mutex held to protect against cpu hotplug */
2507 static int __cache_shrink(struct kmem_cache *cachep)
2508 {
2509 	int ret = 0, i = 0;
2510 	struct kmem_list3 *l3;
2511 
2512 	drain_cpu_caches(cachep);
2513 
2514 	check_irq_on();
2515 	for_each_online_node(i) {
2516 		l3 = cachep->nodelists[i];
2517 		if (!l3)
2518 			continue;
2519 
2520 		drain_freelist(cachep, l3, l3->free_objects);
2521 
2522 		ret += !list_empty(&l3->slabs_full) ||
2523 			!list_empty(&l3->slabs_partial);
2524 	}
2525 	return (ret ? 1 : 0);
2526 }
2527 
2528 /**
2529  * kmem_cache_shrink - Shrink a cache.
2530  * @cachep: The cache to shrink.
2531  *
2532  * Releases as many slabs as possible for a cache.
2533  * To help debugging, a zero exit status indicates all slabs were released.
2534  */
2535 int kmem_cache_shrink(struct kmem_cache *cachep)
2536 {
2537 	int ret;
2538 	BUG_ON(!cachep || in_interrupt());
2539 
2540 	mutex_lock(&cache_chain_mutex);
2541 	ret = __cache_shrink(cachep);
2542 	mutex_unlock(&cache_chain_mutex);
2543 	return ret;
2544 }
2545 EXPORT_SYMBOL(kmem_cache_shrink);
2546 
2547 /**
2548  * kmem_cache_destroy - delete a cache
2549  * @cachep: the cache to destroy
2550  *
2551  * Remove a &struct kmem_cache object from the slab cache.
2552  *
2553  * It is expected this function will be called by a module when it is
2554  * unloaded.  This will remove the cache completely, and avoid a duplicate
2555  * cache being allocated each time a module is loaded and unloaded, if the
2556  * module doesn't have persistent in-kernel storage across loads and unloads.
2557  *
2558  * The cache must be empty before calling this function.
2559  *
2560  * The caller must guarantee that noone will allocate memory from the cache
2561  * during the kmem_cache_destroy().
2562  */
2563 void kmem_cache_destroy(struct kmem_cache *cachep)
2564 {
2565 	BUG_ON(!cachep || in_interrupt());
2566 
2567 	/* Find the cache in the chain of caches. */
2568 	mutex_lock(&cache_chain_mutex);
2569 	/*
2570 	 * the chain is never empty, cache_cache is never destroyed
2571 	 */
2572 	list_del(&cachep->next);
2573 	if (__cache_shrink(cachep)) {
2574 		slab_error(cachep, "Can't free all objects");
2575 		list_add(&cachep->next, &cache_chain);
2576 		mutex_unlock(&cache_chain_mutex);
2577 		return;
2578 	}
2579 
2580 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2581 		synchronize_rcu();
2582 
2583 	__kmem_cache_destroy(cachep);
2584 	mutex_unlock(&cache_chain_mutex);
2585 }
2586 EXPORT_SYMBOL(kmem_cache_destroy);
2587 
2588 /*
2589  * Get the memory for a slab management obj.
2590  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2591  * always come from malloc_sizes caches.  The slab descriptor cannot
2592  * come from the same cache which is getting created because,
2593  * when we are searching for an appropriate cache for these
2594  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2595  * If we are creating a malloc_sizes cache here it would not be visible to
2596  * kmem_find_general_cachep till the initialization is complete.
2597  * Hence we cannot have slabp_cache same as the original cache.
2598  */
2599 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2600 				   int colour_off, gfp_t local_flags,
2601 				   int nodeid)
2602 {
2603 	struct slab *slabp;
2604 
2605 	if (OFF_SLAB(cachep)) {
2606 		/* Slab management obj is off-slab. */
2607 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2608 					      local_flags & ~GFP_THISNODE, nodeid);
2609 		if (!slabp)
2610 			return NULL;
2611 	} else {
2612 		slabp = objp + colour_off;
2613 		colour_off += cachep->slab_size;
2614 	}
2615 	slabp->inuse = 0;
2616 	slabp->colouroff = colour_off;
2617 	slabp->s_mem = objp + colour_off;
2618 	slabp->nodeid = nodeid;
2619 	return slabp;
2620 }
2621 
2622 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2623 {
2624 	return (kmem_bufctl_t *) (slabp + 1);
2625 }
2626 
2627 static void cache_init_objs(struct kmem_cache *cachep,
2628 			    struct slab *slabp, unsigned long ctor_flags)
2629 {
2630 	int i;
2631 
2632 	for (i = 0; i < cachep->num; i++) {
2633 		void *objp = index_to_obj(cachep, slabp, i);
2634 #if DEBUG
2635 		/* need to poison the objs? */
2636 		if (cachep->flags & SLAB_POISON)
2637 			poison_obj(cachep, objp, POISON_FREE);
2638 		if (cachep->flags & SLAB_STORE_USER)
2639 			*dbg_userword(cachep, objp) = NULL;
2640 
2641 		if (cachep->flags & SLAB_RED_ZONE) {
2642 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2643 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2644 		}
2645 		/*
2646 		 * Constructors are not allowed to allocate memory from the same
2647 		 * cache which they are a constructor for.  Otherwise, deadlock.
2648 		 * They must also be threaded.
2649 		 */
2650 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2651 			cachep->ctor(objp + obj_offset(cachep), cachep,
2652 				     ctor_flags);
2653 
2654 		if (cachep->flags & SLAB_RED_ZONE) {
2655 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2656 				slab_error(cachep, "constructor overwrote the"
2657 					   " end of an object");
2658 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2659 				slab_error(cachep, "constructor overwrote the"
2660 					   " start of an object");
2661 		}
2662 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2663 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2664 			kernel_map_pages(virt_to_page(objp),
2665 					 cachep->buffer_size / PAGE_SIZE, 0);
2666 #else
2667 		if (cachep->ctor)
2668 			cachep->ctor(objp, cachep, ctor_flags);
2669 #endif
2670 		slab_bufctl(slabp)[i] = i + 1;
2671 	}
2672 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2673 	slabp->free = 0;
2674 }
2675 
2676 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2677 {
2678 	if (CONFIG_ZONE_DMA_FLAG) {
2679 		if (flags & GFP_DMA)
2680 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2681 		else
2682 			BUG_ON(cachep->gfpflags & GFP_DMA);
2683 	}
2684 }
2685 
2686 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2687 				int nodeid)
2688 {
2689 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2690 	kmem_bufctl_t next;
2691 
2692 	slabp->inuse++;
2693 	next = slab_bufctl(slabp)[slabp->free];
2694 #if DEBUG
2695 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2696 	WARN_ON(slabp->nodeid != nodeid);
2697 #endif
2698 	slabp->free = next;
2699 
2700 	return objp;
2701 }
2702 
2703 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2704 				void *objp, int nodeid)
2705 {
2706 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2707 
2708 #if DEBUG
2709 	/* Verify that the slab belongs to the intended node */
2710 	WARN_ON(slabp->nodeid != nodeid);
2711 
2712 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2713 		printk(KERN_ERR "slab: double free detected in cache "
2714 				"'%s', objp %p\n", cachep->name, objp);
2715 		BUG();
2716 	}
2717 #endif
2718 	slab_bufctl(slabp)[objnr] = slabp->free;
2719 	slabp->free = objnr;
2720 	slabp->inuse--;
2721 }
2722 
2723 /*
2724  * Map pages beginning at addr to the given cache and slab. This is required
2725  * for the slab allocator to be able to lookup the cache and slab of a
2726  * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2727  */
2728 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2729 			   void *addr)
2730 {
2731 	int nr_pages;
2732 	struct page *page;
2733 
2734 	page = virt_to_page(addr);
2735 
2736 	nr_pages = 1;
2737 	if (likely(!PageCompound(page)))
2738 		nr_pages <<= cache->gfporder;
2739 
2740 	do {
2741 		page_set_cache(page, cache);
2742 		page_set_slab(page, slab);
2743 		page++;
2744 	} while (--nr_pages);
2745 }
2746 
2747 /*
2748  * Grow (by 1) the number of slabs within a cache.  This is called by
2749  * kmem_cache_alloc() when there are no active objs left in a cache.
2750  */
2751 static int cache_grow(struct kmem_cache *cachep,
2752 		gfp_t flags, int nodeid, void *objp)
2753 {
2754 	struct slab *slabp;
2755 	size_t offset;
2756 	gfp_t local_flags;
2757 	unsigned long ctor_flags;
2758 	struct kmem_list3 *l3;
2759 
2760 	/*
2761 	 * Be lazy and only check for valid flags here,  keeping it out of the
2762 	 * critical path in kmem_cache_alloc().
2763 	 */
2764 	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2765 
2766 	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2767 	local_flags = (flags & GFP_LEVEL_MASK);
2768 	/* Take the l3 list lock to change the colour_next on this node */
2769 	check_irq_off();
2770 	l3 = cachep->nodelists[nodeid];
2771 	spin_lock(&l3->list_lock);
2772 
2773 	/* Get colour for the slab, and cal the next value. */
2774 	offset = l3->colour_next;
2775 	l3->colour_next++;
2776 	if (l3->colour_next >= cachep->colour)
2777 		l3->colour_next = 0;
2778 	spin_unlock(&l3->list_lock);
2779 
2780 	offset *= cachep->colour_off;
2781 
2782 	if (local_flags & __GFP_WAIT)
2783 		local_irq_enable();
2784 
2785 	/*
2786 	 * The test for missing atomic flag is performed here, rather than
2787 	 * the more obvious place, simply to reduce the critical path length
2788 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2789 	 * will eventually be caught here (where it matters).
2790 	 */
2791 	kmem_flagcheck(cachep, flags);
2792 
2793 	/*
2794 	 * Get mem for the objs.  Attempt to allocate a physical page from
2795 	 * 'nodeid'.
2796 	 */
2797 	if (!objp)
2798 		objp = kmem_getpages(cachep, flags, nodeid);
2799 	if (!objp)
2800 		goto failed;
2801 
2802 	/* Get slab management. */
2803 	slabp = alloc_slabmgmt(cachep, objp, offset,
2804 			local_flags & ~GFP_THISNODE, nodeid);
2805 	if (!slabp)
2806 		goto opps1;
2807 
2808 	slabp->nodeid = nodeid;
2809 	slab_map_pages(cachep, slabp, objp);
2810 
2811 	cache_init_objs(cachep, slabp, ctor_flags);
2812 
2813 	if (local_flags & __GFP_WAIT)
2814 		local_irq_disable();
2815 	check_irq_off();
2816 	spin_lock(&l3->list_lock);
2817 
2818 	/* Make slab active. */
2819 	list_add_tail(&slabp->list, &(l3->slabs_free));
2820 	STATS_INC_GROWN(cachep);
2821 	l3->free_objects += cachep->num;
2822 	spin_unlock(&l3->list_lock);
2823 	return 1;
2824 opps1:
2825 	kmem_freepages(cachep, objp);
2826 failed:
2827 	if (local_flags & __GFP_WAIT)
2828 		local_irq_disable();
2829 	return 0;
2830 }
2831 
2832 #if DEBUG
2833 
2834 /*
2835  * Perform extra freeing checks:
2836  * - detect bad pointers.
2837  * - POISON/RED_ZONE checking
2838  * - destructor calls, for caches with POISON+dtor
2839  */
2840 static void kfree_debugcheck(const void *objp)
2841 {
2842 	if (!virt_addr_valid(objp)) {
2843 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2844 		       (unsigned long)objp);
2845 		BUG();
2846 	}
2847 }
2848 
2849 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2850 {
2851 	unsigned long long redzone1, redzone2;
2852 
2853 	redzone1 = *dbg_redzone1(cache, obj);
2854 	redzone2 = *dbg_redzone2(cache, obj);
2855 
2856 	/*
2857 	 * Redzone is ok.
2858 	 */
2859 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2860 		return;
2861 
2862 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2863 		slab_error(cache, "double free detected");
2864 	else
2865 		slab_error(cache, "memory outside object was overwritten");
2866 
2867 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2868 			obj, redzone1, redzone2);
2869 }
2870 
2871 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2872 				   void *caller)
2873 {
2874 	struct page *page;
2875 	unsigned int objnr;
2876 	struct slab *slabp;
2877 
2878 	objp -= obj_offset(cachep);
2879 	kfree_debugcheck(objp);
2880 	page = virt_to_head_page(objp);
2881 
2882 	slabp = page_get_slab(page);
2883 
2884 	if (cachep->flags & SLAB_RED_ZONE) {
2885 		verify_redzone_free(cachep, objp);
2886 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2887 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2888 	}
2889 	if (cachep->flags & SLAB_STORE_USER)
2890 		*dbg_userword(cachep, objp) = caller;
2891 
2892 	objnr = obj_to_index(cachep, slabp, objp);
2893 
2894 	BUG_ON(objnr >= cachep->num);
2895 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2896 
2897 	if (cachep->flags & SLAB_POISON && cachep->dtor) {
2898 		/* we want to cache poison the object,
2899 		 * call the destruction callback
2900 		 */
2901 		cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2902 	}
2903 #ifdef CONFIG_DEBUG_SLAB_LEAK
2904 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2905 #endif
2906 	if (cachep->flags & SLAB_POISON) {
2907 #ifdef CONFIG_DEBUG_PAGEALLOC
2908 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2909 			store_stackinfo(cachep, objp, (unsigned long)caller);
2910 			kernel_map_pages(virt_to_page(objp),
2911 					 cachep->buffer_size / PAGE_SIZE, 0);
2912 		} else {
2913 			poison_obj(cachep, objp, POISON_FREE);
2914 		}
2915 #else
2916 		poison_obj(cachep, objp, POISON_FREE);
2917 #endif
2918 	}
2919 	return objp;
2920 }
2921 
2922 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2923 {
2924 	kmem_bufctl_t i;
2925 	int entries = 0;
2926 
2927 	/* Check slab's freelist to see if this obj is there. */
2928 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2929 		entries++;
2930 		if (entries > cachep->num || i >= cachep->num)
2931 			goto bad;
2932 	}
2933 	if (entries != cachep->num - slabp->inuse) {
2934 bad:
2935 		printk(KERN_ERR "slab: Internal list corruption detected in "
2936 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2937 			cachep->name, cachep->num, slabp, slabp->inuse);
2938 		for (i = 0;
2939 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2940 		     i++) {
2941 			if (i % 16 == 0)
2942 				printk("\n%03x:", i);
2943 			printk(" %02x", ((unsigned char *)slabp)[i]);
2944 		}
2945 		printk("\n");
2946 		BUG();
2947 	}
2948 }
2949 #else
2950 #define kfree_debugcheck(x) do { } while(0)
2951 #define cache_free_debugcheck(x,objp,z) (objp)
2952 #define check_slabp(x,y) do { } while(0)
2953 #endif
2954 
2955 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2956 {
2957 	int batchcount;
2958 	struct kmem_list3 *l3;
2959 	struct array_cache *ac;
2960 	int node;
2961 
2962 	node = numa_node_id();
2963 
2964 	check_irq_off();
2965 	ac = cpu_cache_get(cachep);
2966 retry:
2967 	batchcount = ac->batchcount;
2968 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2969 		/*
2970 		 * If there was little recent activity on this cache, then
2971 		 * perform only a partial refill.  Otherwise we could generate
2972 		 * refill bouncing.
2973 		 */
2974 		batchcount = BATCHREFILL_LIMIT;
2975 	}
2976 	l3 = cachep->nodelists[node];
2977 
2978 	BUG_ON(ac->avail > 0 || !l3);
2979 	spin_lock(&l3->list_lock);
2980 
2981 	/* See if we can refill from the shared array */
2982 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2983 		goto alloc_done;
2984 
2985 	while (batchcount > 0) {
2986 		struct list_head *entry;
2987 		struct slab *slabp;
2988 		/* Get slab alloc is to come from. */
2989 		entry = l3->slabs_partial.next;
2990 		if (entry == &l3->slabs_partial) {
2991 			l3->free_touched = 1;
2992 			entry = l3->slabs_free.next;
2993 			if (entry == &l3->slabs_free)
2994 				goto must_grow;
2995 		}
2996 
2997 		slabp = list_entry(entry, struct slab, list);
2998 		check_slabp(cachep, slabp);
2999 		check_spinlock_acquired(cachep);
3000 
3001 		/*
3002 		 * The slab was either on partial or free list so
3003 		 * there must be at least one object available for
3004 		 * allocation.
3005 		 */
3006 		BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
3007 
3008 		while (slabp->inuse < cachep->num && batchcount--) {
3009 			STATS_INC_ALLOCED(cachep);
3010 			STATS_INC_ACTIVE(cachep);
3011 			STATS_SET_HIGH(cachep);
3012 
3013 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3014 							    node);
3015 		}
3016 		check_slabp(cachep, slabp);
3017 
3018 		/* move slabp to correct slabp list: */
3019 		list_del(&slabp->list);
3020 		if (slabp->free == BUFCTL_END)
3021 			list_add(&slabp->list, &l3->slabs_full);
3022 		else
3023 			list_add(&slabp->list, &l3->slabs_partial);
3024 	}
3025 
3026 must_grow:
3027 	l3->free_objects -= ac->avail;
3028 alloc_done:
3029 	spin_unlock(&l3->list_lock);
3030 
3031 	if (unlikely(!ac->avail)) {
3032 		int x;
3033 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3034 
3035 		/* cache_grow can reenable interrupts, then ac could change. */
3036 		ac = cpu_cache_get(cachep);
3037 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3038 			return NULL;
3039 
3040 		if (!ac->avail)		/* objects refilled by interrupt? */
3041 			goto retry;
3042 	}
3043 	ac->touched = 1;
3044 	return ac->entry[--ac->avail];
3045 }
3046 
3047 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3048 						gfp_t flags)
3049 {
3050 	might_sleep_if(flags & __GFP_WAIT);
3051 #if DEBUG
3052 	kmem_flagcheck(cachep, flags);
3053 #endif
3054 }
3055 
3056 #if DEBUG
3057 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3058 				gfp_t flags, void *objp, void *caller)
3059 {
3060 	if (!objp)
3061 		return objp;
3062 	if (cachep->flags & SLAB_POISON) {
3063 #ifdef CONFIG_DEBUG_PAGEALLOC
3064 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3065 			kernel_map_pages(virt_to_page(objp),
3066 					 cachep->buffer_size / PAGE_SIZE, 1);
3067 		else
3068 			check_poison_obj(cachep, objp);
3069 #else
3070 		check_poison_obj(cachep, objp);
3071 #endif
3072 		poison_obj(cachep, objp, POISON_INUSE);
3073 	}
3074 	if (cachep->flags & SLAB_STORE_USER)
3075 		*dbg_userword(cachep, objp) = caller;
3076 
3077 	if (cachep->flags & SLAB_RED_ZONE) {
3078 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3079 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3080 			slab_error(cachep, "double free, or memory outside"
3081 						" object was overwritten");
3082 			printk(KERN_ERR
3083 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3084 				objp, *dbg_redzone1(cachep, objp),
3085 				*dbg_redzone2(cachep, objp));
3086 		}
3087 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3088 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3089 	}
3090 #ifdef CONFIG_DEBUG_SLAB_LEAK
3091 	{
3092 		struct slab *slabp;
3093 		unsigned objnr;
3094 
3095 		slabp = page_get_slab(virt_to_head_page(objp));
3096 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3097 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3098 	}
3099 #endif
3100 	objp += obj_offset(cachep);
3101 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3102 		cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
3103 #if ARCH_SLAB_MINALIGN
3104 	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3105 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3106 		       objp, ARCH_SLAB_MINALIGN);
3107 	}
3108 #endif
3109 	return objp;
3110 }
3111 #else
3112 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3113 #endif
3114 
3115 #ifdef CONFIG_FAILSLAB
3116 
3117 static struct failslab_attr {
3118 
3119 	struct fault_attr attr;
3120 
3121 	u32 ignore_gfp_wait;
3122 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3123 	struct dentry *ignore_gfp_wait_file;
3124 #endif
3125 
3126 } failslab = {
3127 	.attr = FAULT_ATTR_INITIALIZER,
3128 	.ignore_gfp_wait = 1,
3129 };
3130 
3131 static int __init setup_failslab(char *str)
3132 {
3133 	return setup_fault_attr(&failslab.attr, str);
3134 }
3135 __setup("failslab=", setup_failslab);
3136 
3137 static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3138 {
3139 	if (cachep == &cache_cache)
3140 		return 0;
3141 	if (flags & __GFP_NOFAIL)
3142 		return 0;
3143 	if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3144 		return 0;
3145 
3146 	return should_fail(&failslab.attr, obj_size(cachep));
3147 }
3148 
3149 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3150 
3151 static int __init failslab_debugfs(void)
3152 {
3153 	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3154 	struct dentry *dir;
3155 	int err;
3156 
3157 	err = init_fault_attr_dentries(&failslab.attr, "failslab");
3158 	if (err)
3159 		return err;
3160 	dir = failslab.attr.dentries.dir;
3161 
3162 	failslab.ignore_gfp_wait_file =
3163 		debugfs_create_bool("ignore-gfp-wait", mode, dir,
3164 				      &failslab.ignore_gfp_wait);
3165 
3166 	if (!failslab.ignore_gfp_wait_file) {
3167 		err = -ENOMEM;
3168 		debugfs_remove(failslab.ignore_gfp_wait_file);
3169 		cleanup_fault_attr_dentries(&failslab.attr);
3170 	}
3171 
3172 	return err;
3173 }
3174 
3175 late_initcall(failslab_debugfs);
3176 
3177 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3178 
3179 #else /* CONFIG_FAILSLAB */
3180 
3181 static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3182 {
3183 	return 0;
3184 }
3185 
3186 #endif /* CONFIG_FAILSLAB */
3187 
3188 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3189 {
3190 	void *objp;
3191 	struct array_cache *ac;
3192 
3193 	check_irq_off();
3194 
3195 	ac = cpu_cache_get(cachep);
3196 	if (likely(ac->avail)) {
3197 		STATS_INC_ALLOCHIT(cachep);
3198 		ac->touched = 1;
3199 		objp = ac->entry[--ac->avail];
3200 	} else {
3201 		STATS_INC_ALLOCMISS(cachep);
3202 		objp = cache_alloc_refill(cachep, flags);
3203 	}
3204 	return objp;
3205 }
3206 
3207 #ifdef CONFIG_NUMA
3208 /*
3209  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3210  *
3211  * If we are in_interrupt, then process context, including cpusets and
3212  * mempolicy, may not apply and should not be used for allocation policy.
3213  */
3214 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3215 {
3216 	int nid_alloc, nid_here;
3217 
3218 	if (in_interrupt() || (flags & __GFP_THISNODE))
3219 		return NULL;
3220 	nid_alloc = nid_here = numa_node_id();
3221 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3222 		nid_alloc = cpuset_mem_spread_node();
3223 	else if (current->mempolicy)
3224 		nid_alloc = slab_node(current->mempolicy);
3225 	if (nid_alloc != nid_here)
3226 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3227 	return NULL;
3228 }
3229 
3230 /*
3231  * Fallback function if there was no memory available and no objects on a
3232  * certain node and fall back is permitted. First we scan all the
3233  * available nodelists for available objects. If that fails then we
3234  * perform an allocation without specifying a node. This allows the page
3235  * allocator to do its reclaim / fallback magic. We then insert the
3236  * slab into the proper nodelist and then allocate from it.
3237  */
3238 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3239 {
3240 	struct zonelist *zonelist;
3241 	gfp_t local_flags;
3242 	struct zone **z;
3243 	void *obj = NULL;
3244 	int nid;
3245 
3246 	if (flags & __GFP_THISNODE)
3247 		return NULL;
3248 
3249 	zonelist = &NODE_DATA(slab_node(current->mempolicy))
3250 			->node_zonelists[gfp_zone(flags)];
3251 	local_flags = (flags & GFP_LEVEL_MASK);
3252 
3253 retry:
3254 	/*
3255 	 * Look through allowed nodes for objects available
3256 	 * from existing per node queues.
3257 	 */
3258 	for (z = zonelist->zones; *z && !obj; z++) {
3259 		nid = zone_to_nid(*z);
3260 
3261 		if (cpuset_zone_allowed_hardwall(*z, flags) &&
3262 			cache->nodelists[nid] &&
3263 			cache->nodelists[nid]->free_objects)
3264 				obj = ____cache_alloc_node(cache,
3265 					flags | GFP_THISNODE, nid);
3266 	}
3267 
3268 	if (!obj) {
3269 		/*
3270 		 * This allocation will be performed within the constraints
3271 		 * of the current cpuset / memory policy requirements.
3272 		 * We may trigger various forms of reclaim on the allowed
3273 		 * set and go into memory reserves if necessary.
3274 		 */
3275 		if (local_flags & __GFP_WAIT)
3276 			local_irq_enable();
3277 		kmem_flagcheck(cache, flags);
3278 		obj = kmem_getpages(cache, flags, -1);
3279 		if (local_flags & __GFP_WAIT)
3280 			local_irq_disable();
3281 		if (obj) {
3282 			/*
3283 			 * Insert into the appropriate per node queues
3284 			 */
3285 			nid = page_to_nid(virt_to_page(obj));
3286 			if (cache_grow(cache, flags, nid, obj)) {
3287 				obj = ____cache_alloc_node(cache,
3288 					flags | GFP_THISNODE, nid);
3289 				if (!obj)
3290 					/*
3291 					 * Another processor may allocate the
3292 					 * objects in the slab since we are
3293 					 * not holding any locks.
3294 					 */
3295 					goto retry;
3296 			} else {
3297 				/* cache_grow already freed obj */
3298 				obj = NULL;
3299 			}
3300 		}
3301 	}
3302 	return obj;
3303 }
3304 
3305 /*
3306  * A interface to enable slab creation on nodeid
3307  */
3308 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3309 				int nodeid)
3310 {
3311 	struct list_head *entry;
3312 	struct slab *slabp;
3313 	struct kmem_list3 *l3;
3314 	void *obj;
3315 	int x;
3316 
3317 	l3 = cachep->nodelists[nodeid];
3318 	BUG_ON(!l3);
3319 
3320 retry:
3321 	check_irq_off();
3322 	spin_lock(&l3->list_lock);
3323 	entry = l3->slabs_partial.next;
3324 	if (entry == &l3->slabs_partial) {
3325 		l3->free_touched = 1;
3326 		entry = l3->slabs_free.next;
3327 		if (entry == &l3->slabs_free)
3328 			goto must_grow;
3329 	}
3330 
3331 	slabp = list_entry(entry, struct slab, list);
3332 	check_spinlock_acquired_node(cachep, nodeid);
3333 	check_slabp(cachep, slabp);
3334 
3335 	STATS_INC_NODEALLOCS(cachep);
3336 	STATS_INC_ACTIVE(cachep);
3337 	STATS_SET_HIGH(cachep);
3338 
3339 	BUG_ON(slabp->inuse == cachep->num);
3340 
3341 	obj = slab_get_obj(cachep, slabp, nodeid);
3342 	check_slabp(cachep, slabp);
3343 	l3->free_objects--;
3344 	/* move slabp to correct slabp list: */
3345 	list_del(&slabp->list);
3346 
3347 	if (slabp->free == BUFCTL_END)
3348 		list_add(&slabp->list, &l3->slabs_full);
3349 	else
3350 		list_add(&slabp->list, &l3->slabs_partial);
3351 
3352 	spin_unlock(&l3->list_lock);
3353 	goto done;
3354 
3355 must_grow:
3356 	spin_unlock(&l3->list_lock);
3357 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3358 	if (x)
3359 		goto retry;
3360 
3361 	return fallback_alloc(cachep, flags);
3362 
3363 done:
3364 	return obj;
3365 }
3366 
3367 /**
3368  * kmem_cache_alloc_node - Allocate an object on the specified node
3369  * @cachep: The cache to allocate from.
3370  * @flags: See kmalloc().
3371  * @nodeid: node number of the target node.
3372  * @caller: return address of caller, used for debug information
3373  *
3374  * Identical to kmem_cache_alloc but it will allocate memory on the given
3375  * node, which can improve the performance for cpu bound structures.
3376  *
3377  * Fallback to other node is possible if __GFP_THISNODE is not set.
3378  */
3379 static __always_inline void *
3380 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3381 		   void *caller)
3382 {
3383 	unsigned long save_flags;
3384 	void *ptr;
3385 
3386 	if (should_failslab(cachep, flags))
3387 		return NULL;
3388 
3389 	cache_alloc_debugcheck_before(cachep, flags);
3390 	local_irq_save(save_flags);
3391 
3392 	if (unlikely(nodeid == -1))
3393 		nodeid = numa_node_id();
3394 
3395 	if (unlikely(!cachep->nodelists[nodeid])) {
3396 		/* Node not bootstrapped yet */
3397 		ptr = fallback_alloc(cachep, flags);
3398 		goto out;
3399 	}
3400 
3401 	if (nodeid == numa_node_id()) {
3402 		/*
3403 		 * Use the locally cached objects if possible.
3404 		 * However ____cache_alloc does not allow fallback
3405 		 * to other nodes. It may fail while we still have
3406 		 * objects on other nodes available.
3407 		 */
3408 		ptr = ____cache_alloc(cachep, flags);
3409 		if (ptr)
3410 			goto out;
3411 	}
3412 	/* ___cache_alloc_node can fall back to other nodes */
3413 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3414   out:
3415 	local_irq_restore(save_flags);
3416 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3417 
3418 	return ptr;
3419 }
3420 
3421 static __always_inline void *
3422 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3423 {
3424 	void *objp;
3425 
3426 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3427 		objp = alternate_node_alloc(cache, flags);
3428 		if (objp)
3429 			goto out;
3430 	}
3431 	objp = ____cache_alloc(cache, flags);
3432 
3433 	/*
3434 	 * We may just have run out of memory on the local node.
3435 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3436 	 */
3437  	if (!objp)
3438  		objp = ____cache_alloc_node(cache, flags, numa_node_id());
3439 
3440   out:
3441 	return objp;
3442 }
3443 #else
3444 
3445 static __always_inline void *
3446 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3447 {
3448 	return ____cache_alloc(cachep, flags);
3449 }
3450 
3451 #endif /* CONFIG_NUMA */
3452 
3453 static __always_inline void *
3454 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3455 {
3456 	unsigned long save_flags;
3457 	void *objp;
3458 
3459 	if (should_failslab(cachep, flags))
3460 		return NULL;
3461 
3462 	cache_alloc_debugcheck_before(cachep, flags);
3463 	local_irq_save(save_flags);
3464 	objp = __do_cache_alloc(cachep, flags);
3465 	local_irq_restore(save_flags);
3466 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3467 	prefetchw(objp);
3468 
3469 	return objp;
3470 }
3471 
3472 /*
3473  * Caller needs to acquire correct kmem_list's list_lock
3474  */
3475 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3476 		       int node)
3477 {
3478 	int i;
3479 	struct kmem_list3 *l3;
3480 
3481 	for (i = 0; i < nr_objects; i++) {
3482 		void *objp = objpp[i];
3483 		struct slab *slabp;
3484 
3485 		slabp = virt_to_slab(objp);
3486 		l3 = cachep->nodelists[node];
3487 		list_del(&slabp->list);
3488 		check_spinlock_acquired_node(cachep, node);
3489 		check_slabp(cachep, slabp);
3490 		slab_put_obj(cachep, slabp, objp, node);
3491 		STATS_DEC_ACTIVE(cachep);
3492 		l3->free_objects++;
3493 		check_slabp(cachep, slabp);
3494 
3495 		/* fixup slab chains */
3496 		if (slabp->inuse == 0) {
3497 			if (l3->free_objects > l3->free_limit) {
3498 				l3->free_objects -= cachep->num;
3499 				/* No need to drop any previously held
3500 				 * lock here, even if we have a off-slab slab
3501 				 * descriptor it is guaranteed to come from
3502 				 * a different cache, refer to comments before
3503 				 * alloc_slabmgmt.
3504 				 */
3505 				slab_destroy(cachep, slabp);
3506 			} else {
3507 				list_add(&slabp->list, &l3->slabs_free);
3508 			}
3509 		} else {
3510 			/* Unconditionally move a slab to the end of the
3511 			 * partial list on free - maximum time for the
3512 			 * other objects to be freed, too.
3513 			 */
3514 			list_add_tail(&slabp->list, &l3->slabs_partial);
3515 		}
3516 	}
3517 }
3518 
3519 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3520 {
3521 	int batchcount;
3522 	struct kmem_list3 *l3;
3523 	int node = numa_node_id();
3524 
3525 	batchcount = ac->batchcount;
3526 #if DEBUG
3527 	BUG_ON(!batchcount || batchcount > ac->avail);
3528 #endif
3529 	check_irq_off();
3530 	l3 = cachep->nodelists[node];
3531 	spin_lock(&l3->list_lock);
3532 	if (l3->shared) {
3533 		struct array_cache *shared_array = l3->shared;
3534 		int max = shared_array->limit - shared_array->avail;
3535 		if (max) {
3536 			if (batchcount > max)
3537 				batchcount = max;
3538 			memcpy(&(shared_array->entry[shared_array->avail]),
3539 			       ac->entry, sizeof(void *) * batchcount);
3540 			shared_array->avail += batchcount;
3541 			goto free_done;
3542 		}
3543 	}
3544 
3545 	free_block(cachep, ac->entry, batchcount, node);
3546 free_done:
3547 #if STATS
3548 	{
3549 		int i = 0;
3550 		struct list_head *p;
3551 
3552 		p = l3->slabs_free.next;
3553 		while (p != &(l3->slabs_free)) {
3554 			struct slab *slabp;
3555 
3556 			slabp = list_entry(p, struct slab, list);
3557 			BUG_ON(slabp->inuse);
3558 
3559 			i++;
3560 			p = p->next;
3561 		}
3562 		STATS_SET_FREEABLE(cachep, i);
3563 	}
3564 #endif
3565 	spin_unlock(&l3->list_lock);
3566 	ac->avail -= batchcount;
3567 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3568 }
3569 
3570 /*
3571  * Release an obj back to its cache. If the obj has a constructed state, it must
3572  * be in this state _before_ it is released.  Called with disabled ints.
3573  */
3574 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3575 {
3576 	struct array_cache *ac = cpu_cache_get(cachep);
3577 
3578 	check_irq_off();
3579 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3580 
3581 	if (use_alien_caches && cache_free_alien(cachep, objp))
3582 		return;
3583 
3584 	if (likely(ac->avail < ac->limit)) {
3585 		STATS_INC_FREEHIT(cachep);
3586 		ac->entry[ac->avail++] = objp;
3587 		return;
3588 	} else {
3589 		STATS_INC_FREEMISS(cachep);
3590 		cache_flusharray(cachep, ac);
3591 		ac->entry[ac->avail++] = objp;
3592 	}
3593 }
3594 
3595 /**
3596  * kmem_cache_alloc - Allocate an object
3597  * @cachep: The cache to allocate from.
3598  * @flags: See kmalloc().
3599  *
3600  * Allocate an object from this cache.  The flags are only relevant
3601  * if the cache has no available objects.
3602  */
3603 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3604 {
3605 	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3606 }
3607 EXPORT_SYMBOL(kmem_cache_alloc);
3608 
3609 /**
3610  * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3611  * @cache: The cache to allocate from.
3612  * @flags: See kmalloc().
3613  *
3614  * Allocate an object from this cache and set the allocated memory to zero.
3615  * The flags are only relevant if the cache has no available objects.
3616  */
3617 void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3618 {
3619 	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3620 	if (ret)
3621 		memset(ret, 0, obj_size(cache));
3622 	return ret;
3623 }
3624 EXPORT_SYMBOL(kmem_cache_zalloc);
3625 
3626 /**
3627  * kmem_ptr_validate - check if an untrusted pointer might
3628  *	be a slab entry.
3629  * @cachep: the cache we're checking against
3630  * @ptr: pointer to validate
3631  *
3632  * This verifies that the untrusted pointer looks sane:
3633  * it is _not_ a guarantee that the pointer is actually
3634  * part of the slab cache in question, but it at least
3635  * validates that the pointer can be dereferenced and
3636  * looks half-way sane.
3637  *
3638  * Currently only used for dentry validation.
3639  */
3640 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3641 {
3642 	unsigned long addr = (unsigned long)ptr;
3643 	unsigned long min_addr = PAGE_OFFSET;
3644 	unsigned long align_mask = BYTES_PER_WORD - 1;
3645 	unsigned long size = cachep->buffer_size;
3646 	struct page *page;
3647 
3648 	if (unlikely(addr < min_addr))
3649 		goto out;
3650 	if (unlikely(addr > (unsigned long)high_memory - size))
3651 		goto out;
3652 	if (unlikely(addr & align_mask))
3653 		goto out;
3654 	if (unlikely(!kern_addr_valid(addr)))
3655 		goto out;
3656 	if (unlikely(!kern_addr_valid(addr + size - 1)))
3657 		goto out;
3658 	page = virt_to_page(ptr);
3659 	if (unlikely(!PageSlab(page)))
3660 		goto out;
3661 	if (unlikely(page_get_cache(page) != cachep))
3662 		goto out;
3663 	return 1;
3664 out:
3665 	return 0;
3666 }
3667 
3668 #ifdef CONFIG_NUMA
3669 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3670 {
3671 	return __cache_alloc_node(cachep, flags, nodeid,
3672 			__builtin_return_address(0));
3673 }
3674 EXPORT_SYMBOL(kmem_cache_alloc_node);
3675 
3676 static __always_inline void *
3677 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3678 {
3679 	struct kmem_cache *cachep;
3680 
3681 	cachep = kmem_find_general_cachep(size, flags);
3682 	if (unlikely(cachep == NULL))
3683 		return NULL;
3684 	return kmem_cache_alloc_node(cachep, flags, node);
3685 }
3686 
3687 #ifdef CONFIG_DEBUG_SLAB
3688 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3689 {
3690 	return __do_kmalloc_node(size, flags, node,
3691 			__builtin_return_address(0));
3692 }
3693 EXPORT_SYMBOL(__kmalloc_node);
3694 
3695 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3696 		int node, void *caller)
3697 {
3698 	return __do_kmalloc_node(size, flags, node, caller);
3699 }
3700 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3701 #else
3702 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3703 {
3704 	return __do_kmalloc_node(size, flags, node, NULL);
3705 }
3706 EXPORT_SYMBOL(__kmalloc_node);
3707 #endif /* CONFIG_DEBUG_SLAB */
3708 #endif /* CONFIG_NUMA */
3709 
3710 /**
3711  * __do_kmalloc - allocate memory
3712  * @size: how many bytes of memory are required.
3713  * @flags: the type of memory to allocate (see kmalloc).
3714  * @caller: function caller for debug tracking of the caller
3715  */
3716 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3717 					  void *caller)
3718 {
3719 	struct kmem_cache *cachep;
3720 
3721 	/* If you want to save a few bytes .text space: replace
3722 	 * __ with kmem_.
3723 	 * Then kmalloc uses the uninlined functions instead of the inline
3724 	 * functions.
3725 	 */
3726 	cachep = __find_general_cachep(size, flags);
3727 	if (unlikely(cachep == NULL))
3728 		return NULL;
3729 	return __cache_alloc(cachep, flags, caller);
3730 }
3731 
3732 
3733 #ifdef CONFIG_DEBUG_SLAB
3734 void *__kmalloc(size_t size, gfp_t flags)
3735 {
3736 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3737 }
3738 EXPORT_SYMBOL(__kmalloc);
3739 
3740 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3741 {
3742 	return __do_kmalloc(size, flags, caller);
3743 }
3744 EXPORT_SYMBOL(__kmalloc_track_caller);
3745 
3746 #else
3747 void *__kmalloc(size_t size, gfp_t flags)
3748 {
3749 	return __do_kmalloc(size, flags, NULL);
3750 }
3751 EXPORT_SYMBOL(__kmalloc);
3752 #endif
3753 
3754 /**
3755  * krealloc - reallocate memory. The contents will remain unchanged.
3756  * @p: object to reallocate memory for.
3757  * @new_size: how many bytes of memory are required.
3758  * @flags: the type of memory to allocate.
3759  *
3760  * The contents of the object pointed to are preserved up to the
3761  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
3762  * behaves exactly like kmalloc().  If @size is 0 and @p is not a
3763  * %NULL pointer, the object pointed to is freed.
3764  */
3765 void *krealloc(const void *p, size_t new_size, gfp_t flags)
3766 {
3767 	struct kmem_cache *cache, *new_cache;
3768 	void *ret;
3769 
3770 	if (unlikely(!p))
3771 		return kmalloc_track_caller(new_size, flags);
3772 
3773 	if (unlikely(!new_size)) {
3774 		kfree(p);
3775 		return NULL;
3776 	}
3777 
3778 	cache = virt_to_cache(p);
3779 	new_cache = __find_general_cachep(new_size, flags);
3780 
3781 	/*
3782  	 * If new size fits in the current cache, bail out.
3783  	 */
3784 	if (likely(cache == new_cache))
3785 		return (void *)p;
3786 
3787 	/*
3788  	 * We are on the slow-path here so do not use __cache_alloc
3789  	 * because it bloats kernel text.
3790  	 */
3791 	ret = kmalloc_track_caller(new_size, flags);
3792 	if (ret) {
3793 		memcpy(ret, p, min(new_size, ksize(p)));
3794 		kfree(p);
3795 	}
3796 	return ret;
3797 }
3798 EXPORT_SYMBOL(krealloc);
3799 
3800 /**
3801  * kmem_cache_free - Deallocate an object
3802  * @cachep: The cache the allocation was from.
3803  * @objp: The previously allocated object.
3804  *
3805  * Free an object which was previously allocated from this
3806  * cache.
3807  */
3808 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3809 {
3810 	unsigned long flags;
3811 
3812 	BUG_ON(virt_to_cache(objp) != cachep);
3813 
3814 	local_irq_save(flags);
3815 	debug_check_no_locks_freed(objp, obj_size(cachep));
3816 	__cache_free(cachep, objp);
3817 	local_irq_restore(flags);
3818 }
3819 EXPORT_SYMBOL(kmem_cache_free);
3820 
3821 /**
3822  * kfree - free previously allocated memory
3823  * @objp: pointer returned by kmalloc.
3824  *
3825  * If @objp is NULL, no operation is performed.
3826  *
3827  * Don't free memory not originally allocated by kmalloc()
3828  * or you will run into trouble.
3829  */
3830 void kfree(const void *objp)
3831 {
3832 	struct kmem_cache *c;
3833 	unsigned long flags;
3834 
3835 	if (unlikely(!objp))
3836 		return;
3837 	local_irq_save(flags);
3838 	kfree_debugcheck(objp);
3839 	c = virt_to_cache(objp);
3840 	debug_check_no_locks_freed(objp, obj_size(c));
3841 	__cache_free(c, (void *)objp);
3842 	local_irq_restore(flags);
3843 }
3844 EXPORT_SYMBOL(kfree);
3845 
3846 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3847 {
3848 	return obj_size(cachep);
3849 }
3850 EXPORT_SYMBOL(kmem_cache_size);
3851 
3852 const char *kmem_cache_name(struct kmem_cache *cachep)
3853 {
3854 	return cachep->name;
3855 }
3856 EXPORT_SYMBOL_GPL(kmem_cache_name);
3857 
3858 /*
3859  * This initializes kmem_list3 or resizes varioius caches for all nodes.
3860  */
3861 static int alloc_kmemlist(struct kmem_cache *cachep)
3862 {
3863 	int node;
3864 	struct kmem_list3 *l3;
3865 	struct array_cache *new_shared;
3866 	struct array_cache **new_alien = NULL;
3867 
3868 	for_each_online_node(node) {
3869 
3870                 if (use_alien_caches) {
3871                         new_alien = alloc_alien_cache(node, cachep->limit);
3872                         if (!new_alien)
3873                                 goto fail;
3874                 }
3875 
3876 		new_shared = NULL;
3877 		if (cachep->shared) {
3878 			new_shared = alloc_arraycache(node,
3879 				cachep->shared*cachep->batchcount,
3880 					0xbaadf00d);
3881 			if (!new_shared) {
3882 				free_alien_cache(new_alien);
3883 				goto fail;
3884 			}
3885 		}
3886 
3887 		l3 = cachep->nodelists[node];
3888 		if (l3) {
3889 			struct array_cache *shared = l3->shared;
3890 
3891 			spin_lock_irq(&l3->list_lock);
3892 
3893 			if (shared)
3894 				free_block(cachep, shared->entry,
3895 						shared->avail, node);
3896 
3897 			l3->shared = new_shared;
3898 			if (!l3->alien) {
3899 				l3->alien = new_alien;
3900 				new_alien = NULL;
3901 			}
3902 			l3->free_limit = (1 + nr_cpus_node(node)) *
3903 					cachep->batchcount + cachep->num;
3904 			spin_unlock_irq(&l3->list_lock);
3905 			kfree(shared);
3906 			free_alien_cache(new_alien);
3907 			continue;
3908 		}
3909 		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3910 		if (!l3) {
3911 			free_alien_cache(new_alien);
3912 			kfree(new_shared);
3913 			goto fail;
3914 		}
3915 
3916 		kmem_list3_init(l3);
3917 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3918 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3919 		l3->shared = new_shared;
3920 		l3->alien = new_alien;
3921 		l3->free_limit = (1 + nr_cpus_node(node)) *
3922 					cachep->batchcount + cachep->num;
3923 		cachep->nodelists[node] = l3;
3924 	}
3925 	return 0;
3926 
3927 fail:
3928 	if (!cachep->next.next) {
3929 		/* Cache is not active yet. Roll back what we did */
3930 		node--;
3931 		while (node >= 0) {
3932 			if (cachep->nodelists[node]) {
3933 				l3 = cachep->nodelists[node];
3934 
3935 				kfree(l3->shared);
3936 				free_alien_cache(l3->alien);
3937 				kfree(l3);
3938 				cachep->nodelists[node] = NULL;
3939 			}
3940 			node--;
3941 		}
3942 	}
3943 	return -ENOMEM;
3944 }
3945 
3946 struct ccupdate_struct {
3947 	struct kmem_cache *cachep;
3948 	struct array_cache *new[NR_CPUS];
3949 };
3950 
3951 static void do_ccupdate_local(void *info)
3952 {
3953 	struct ccupdate_struct *new = info;
3954 	struct array_cache *old;
3955 
3956 	check_irq_off();
3957 	old = cpu_cache_get(new->cachep);
3958 
3959 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3960 	new->new[smp_processor_id()] = old;
3961 }
3962 
3963 /* Always called with the cache_chain_mutex held */
3964 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3965 				int batchcount, int shared)
3966 {
3967 	struct ccupdate_struct *new;
3968 	int i;
3969 
3970 	new = kzalloc(sizeof(*new), GFP_KERNEL);
3971 	if (!new)
3972 		return -ENOMEM;
3973 
3974 	for_each_online_cpu(i) {
3975 		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3976 						batchcount);
3977 		if (!new->new[i]) {
3978 			for (i--; i >= 0; i--)
3979 				kfree(new->new[i]);
3980 			kfree(new);
3981 			return -ENOMEM;
3982 		}
3983 	}
3984 	new->cachep = cachep;
3985 
3986 	on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
3987 
3988 	check_irq_on();
3989 	cachep->batchcount = batchcount;
3990 	cachep->limit = limit;
3991 	cachep->shared = shared;
3992 
3993 	for_each_online_cpu(i) {
3994 		struct array_cache *ccold = new->new[i];
3995 		if (!ccold)
3996 			continue;
3997 		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3998 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3999 		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
4000 		kfree(ccold);
4001 	}
4002 	kfree(new);
4003 	return alloc_kmemlist(cachep);
4004 }
4005 
4006 /* Called with cache_chain_mutex held always */
4007 static int enable_cpucache(struct kmem_cache *cachep)
4008 {
4009 	int err;
4010 	int limit, shared;
4011 
4012 	/*
4013 	 * The head array serves three purposes:
4014 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4015 	 * - reduce the number of spinlock operations.
4016 	 * - reduce the number of linked list operations on the slab and
4017 	 *   bufctl chains: array operations are cheaper.
4018 	 * The numbers are guessed, we should auto-tune as described by
4019 	 * Bonwick.
4020 	 */
4021 	if (cachep->buffer_size > 131072)
4022 		limit = 1;
4023 	else if (cachep->buffer_size > PAGE_SIZE)
4024 		limit = 8;
4025 	else if (cachep->buffer_size > 1024)
4026 		limit = 24;
4027 	else if (cachep->buffer_size > 256)
4028 		limit = 54;
4029 	else
4030 		limit = 120;
4031 
4032 	/*
4033 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4034 	 * allocation behaviour: Most allocs on one cpu, most free operations
4035 	 * on another cpu. For these cases, an efficient object passing between
4036 	 * cpus is necessary. This is provided by a shared array. The array
4037 	 * replaces Bonwick's magazine layer.
4038 	 * On uniprocessor, it's functionally equivalent (but less efficient)
4039 	 * to a larger limit. Thus disabled by default.
4040 	 */
4041 	shared = 0;
4042 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4043 		shared = 8;
4044 
4045 #if DEBUG
4046 	/*
4047 	 * With debugging enabled, large batchcount lead to excessively long
4048 	 * periods with disabled local interrupts. Limit the batchcount
4049 	 */
4050 	if (limit > 32)
4051 		limit = 32;
4052 #endif
4053 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
4054 	if (err)
4055 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4056 		       cachep->name, -err);
4057 	return err;
4058 }
4059 
4060 /*
4061  * Drain an array if it contains any elements taking the l3 lock only if
4062  * necessary. Note that the l3 listlock also protects the array_cache
4063  * if drain_array() is used on the shared array.
4064  */
4065 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4066 			 struct array_cache *ac, int force, int node)
4067 {
4068 	int tofree;
4069 
4070 	if (!ac || !ac->avail)
4071 		return;
4072 	if (ac->touched && !force) {
4073 		ac->touched = 0;
4074 	} else {
4075 		spin_lock_irq(&l3->list_lock);
4076 		if (ac->avail) {
4077 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4078 			if (tofree > ac->avail)
4079 				tofree = (ac->avail + 1) / 2;
4080 			free_block(cachep, ac->entry, tofree, node);
4081 			ac->avail -= tofree;
4082 			memmove(ac->entry, &(ac->entry[tofree]),
4083 				sizeof(void *) * ac->avail);
4084 		}
4085 		spin_unlock_irq(&l3->list_lock);
4086 	}
4087 }
4088 
4089 /**
4090  * cache_reap - Reclaim memory from caches.
4091  * @w: work descriptor
4092  *
4093  * Called from workqueue/eventd every few seconds.
4094  * Purpose:
4095  * - clear the per-cpu caches for this CPU.
4096  * - return freeable pages to the main free memory pool.
4097  *
4098  * If we cannot acquire the cache chain mutex then just give up - we'll try
4099  * again on the next iteration.
4100  */
4101 static void cache_reap(struct work_struct *w)
4102 {
4103 	struct kmem_cache *searchp;
4104 	struct kmem_list3 *l3;
4105 	int node = numa_node_id();
4106 	struct delayed_work *work =
4107 		container_of(w, struct delayed_work, work);
4108 
4109 	if (!mutex_trylock(&cache_chain_mutex))
4110 		/* Give up. Setup the next iteration. */
4111 		goto out;
4112 
4113 	list_for_each_entry(searchp, &cache_chain, next) {
4114 		check_irq_on();
4115 
4116 		/*
4117 		 * We only take the l3 lock if absolutely necessary and we
4118 		 * have established with reasonable certainty that
4119 		 * we can do some work if the lock was obtained.
4120 		 */
4121 		l3 = searchp->nodelists[node];
4122 
4123 		reap_alien(searchp, l3);
4124 
4125 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4126 
4127 		/*
4128 		 * These are racy checks but it does not matter
4129 		 * if we skip one check or scan twice.
4130 		 */
4131 		if (time_after(l3->next_reap, jiffies))
4132 			goto next;
4133 
4134 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4135 
4136 		drain_array(searchp, l3, l3->shared, 0, node);
4137 
4138 		if (l3->free_touched)
4139 			l3->free_touched = 0;
4140 		else {
4141 			int freed;
4142 
4143 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4144 				5 * searchp->num - 1) / (5 * searchp->num));
4145 			STATS_ADD_REAPED(searchp, freed);
4146 		}
4147 next:
4148 		cond_resched();
4149 	}
4150 	check_irq_on();
4151 	mutex_unlock(&cache_chain_mutex);
4152 	next_reap_node();
4153 out:
4154 	/* Set up the next iteration */
4155 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4156 }
4157 
4158 #ifdef CONFIG_PROC_FS
4159 
4160 static void print_slabinfo_header(struct seq_file *m)
4161 {
4162 	/*
4163 	 * Output format version, so at least we can change it
4164 	 * without _too_ many complaints.
4165 	 */
4166 #if STATS
4167 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4168 #else
4169 	seq_puts(m, "slabinfo - version: 2.1\n");
4170 #endif
4171 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4172 		 "<objperslab> <pagesperslab>");
4173 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4174 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4175 #if STATS
4176 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4177 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4178 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4179 #endif
4180 	seq_putc(m, '\n');
4181 }
4182 
4183 static void *s_start(struct seq_file *m, loff_t *pos)
4184 {
4185 	loff_t n = *pos;
4186 	struct list_head *p;
4187 
4188 	mutex_lock(&cache_chain_mutex);
4189 	if (!n)
4190 		print_slabinfo_header(m);
4191 	p = cache_chain.next;
4192 	while (n--) {
4193 		p = p->next;
4194 		if (p == &cache_chain)
4195 			return NULL;
4196 	}
4197 	return list_entry(p, struct kmem_cache, next);
4198 }
4199 
4200 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4201 {
4202 	struct kmem_cache *cachep = p;
4203 	++*pos;
4204 	return cachep->next.next == &cache_chain ?
4205 		NULL : list_entry(cachep->next.next, struct kmem_cache, next);
4206 }
4207 
4208 static void s_stop(struct seq_file *m, void *p)
4209 {
4210 	mutex_unlock(&cache_chain_mutex);
4211 }
4212 
4213 static int s_show(struct seq_file *m, void *p)
4214 {
4215 	struct kmem_cache *cachep = p;
4216 	struct slab *slabp;
4217 	unsigned long active_objs;
4218 	unsigned long num_objs;
4219 	unsigned long active_slabs = 0;
4220 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4221 	const char *name;
4222 	char *error = NULL;
4223 	int node;
4224 	struct kmem_list3 *l3;
4225 
4226 	active_objs = 0;
4227 	num_slabs = 0;
4228 	for_each_online_node(node) {
4229 		l3 = cachep->nodelists[node];
4230 		if (!l3)
4231 			continue;
4232 
4233 		check_irq_on();
4234 		spin_lock_irq(&l3->list_lock);
4235 
4236 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4237 			if (slabp->inuse != cachep->num && !error)
4238 				error = "slabs_full accounting error";
4239 			active_objs += cachep->num;
4240 			active_slabs++;
4241 		}
4242 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4243 			if (slabp->inuse == cachep->num && !error)
4244 				error = "slabs_partial inuse accounting error";
4245 			if (!slabp->inuse && !error)
4246 				error = "slabs_partial/inuse accounting error";
4247 			active_objs += slabp->inuse;
4248 			active_slabs++;
4249 		}
4250 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4251 			if (slabp->inuse && !error)
4252 				error = "slabs_free/inuse accounting error";
4253 			num_slabs++;
4254 		}
4255 		free_objects += l3->free_objects;
4256 		if (l3->shared)
4257 			shared_avail += l3->shared->avail;
4258 
4259 		spin_unlock_irq(&l3->list_lock);
4260 	}
4261 	num_slabs += active_slabs;
4262 	num_objs = num_slabs * cachep->num;
4263 	if (num_objs - active_objs != free_objects && !error)
4264 		error = "free_objects accounting error";
4265 
4266 	name = cachep->name;
4267 	if (error)
4268 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4269 
4270 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4271 		   name, active_objs, num_objs, cachep->buffer_size,
4272 		   cachep->num, (1 << cachep->gfporder));
4273 	seq_printf(m, " : tunables %4u %4u %4u",
4274 		   cachep->limit, cachep->batchcount, cachep->shared);
4275 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4276 		   active_slabs, num_slabs, shared_avail);
4277 #if STATS
4278 	{			/* list3 stats */
4279 		unsigned long high = cachep->high_mark;
4280 		unsigned long allocs = cachep->num_allocations;
4281 		unsigned long grown = cachep->grown;
4282 		unsigned long reaped = cachep->reaped;
4283 		unsigned long errors = cachep->errors;
4284 		unsigned long max_freeable = cachep->max_freeable;
4285 		unsigned long node_allocs = cachep->node_allocs;
4286 		unsigned long node_frees = cachep->node_frees;
4287 		unsigned long overflows = cachep->node_overflow;
4288 
4289 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
4290 				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
4291 				reaped, errors, max_freeable, node_allocs,
4292 				node_frees, overflows);
4293 	}
4294 	/* cpu stats */
4295 	{
4296 		unsigned long allochit = atomic_read(&cachep->allochit);
4297 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4298 		unsigned long freehit = atomic_read(&cachep->freehit);
4299 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4300 
4301 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4302 			   allochit, allocmiss, freehit, freemiss);
4303 	}
4304 #endif
4305 	seq_putc(m, '\n');
4306 	return 0;
4307 }
4308 
4309 /*
4310  * slabinfo_op - iterator that generates /proc/slabinfo
4311  *
4312  * Output layout:
4313  * cache-name
4314  * num-active-objs
4315  * total-objs
4316  * object size
4317  * num-active-slabs
4318  * total-slabs
4319  * num-pages-per-slab
4320  * + further values on SMP and with statistics enabled
4321  */
4322 
4323 const struct seq_operations slabinfo_op = {
4324 	.start = s_start,
4325 	.next = s_next,
4326 	.stop = s_stop,
4327 	.show = s_show,
4328 };
4329 
4330 #define MAX_SLABINFO_WRITE 128
4331 /**
4332  * slabinfo_write - Tuning for the slab allocator
4333  * @file: unused
4334  * @buffer: user buffer
4335  * @count: data length
4336  * @ppos: unused
4337  */
4338 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4339 		       size_t count, loff_t *ppos)
4340 {
4341 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4342 	int limit, batchcount, shared, res;
4343 	struct kmem_cache *cachep;
4344 
4345 	if (count > MAX_SLABINFO_WRITE)
4346 		return -EINVAL;
4347 	if (copy_from_user(&kbuf, buffer, count))
4348 		return -EFAULT;
4349 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4350 
4351 	tmp = strchr(kbuf, ' ');
4352 	if (!tmp)
4353 		return -EINVAL;
4354 	*tmp = '\0';
4355 	tmp++;
4356 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4357 		return -EINVAL;
4358 
4359 	/* Find the cache in the chain of caches. */
4360 	mutex_lock(&cache_chain_mutex);
4361 	res = -EINVAL;
4362 	list_for_each_entry(cachep, &cache_chain, next) {
4363 		if (!strcmp(cachep->name, kbuf)) {
4364 			if (limit < 1 || batchcount < 1 ||
4365 					batchcount > limit || shared < 0) {
4366 				res = 0;
4367 			} else {
4368 				res = do_tune_cpucache(cachep, limit,
4369 						       batchcount, shared);
4370 			}
4371 			break;
4372 		}
4373 	}
4374 	mutex_unlock(&cache_chain_mutex);
4375 	if (res >= 0)
4376 		res = count;
4377 	return res;
4378 }
4379 
4380 #ifdef CONFIG_DEBUG_SLAB_LEAK
4381 
4382 static void *leaks_start(struct seq_file *m, loff_t *pos)
4383 {
4384 	loff_t n = *pos;
4385 	struct list_head *p;
4386 
4387 	mutex_lock(&cache_chain_mutex);
4388 	p = cache_chain.next;
4389 	while (n--) {
4390 		p = p->next;
4391 		if (p == &cache_chain)
4392 			return NULL;
4393 	}
4394 	return list_entry(p, struct kmem_cache, next);
4395 }
4396 
4397 static inline int add_caller(unsigned long *n, unsigned long v)
4398 {
4399 	unsigned long *p;
4400 	int l;
4401 	if (!v)
4402 		return 1;
4403 	l = n[1];
4404 	p = n + 2;
4405 	while (l) {
4406 		int i = l/2;
4407 		unsigned long *q = p + 2 * i;
4408 		if (*q == v) {
4409 			q[1]++;
4410 			return 1;
4411 		}
4412 		if (*q > v) {
4413 			l = i;
4414 		} else {
4415 			p = q + 2;
4416 			l -= i + 1;
4417 		}
4418 	}
4419 	if (++n[1] == n[0])
4420 		return 0;
4421 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4422 	p[0] = v;
4423 	p[1] = 1;
4424 	return 1;
4425 }
4426 
4427 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4428 {
4429 	void *p;
4430 	int i;
4431 	if (n[0] == n[1])
4432 		return;
4433 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4434 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4435 			continue;
4436 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4437 			return;
4438 	}
4439 }
4440 
4441 static void show_symbol(struct seq_file *m, unsigned long address)
4442 {
4443 #ifdef CONFIG_KALLSYMS
4444 	unsigned long offset, size;
4445 	char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
4446 
4447 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4448 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4449 		if (modname[0])
4450 			seq_printf(m, " [%s]", modname);
4451 		return;
4452 	}
4453 #endif
4454 	seq_printf(m, "%p", (void *)address);
4455 }
4456 
4457 static int leaks_show(struct seq_file *m, void *p)
4458 {
4459 	struct kmem_cache *cachep = p;
4460 	struct slab *slabp;
4461 	struct kmem_list3 *l3;
4462 	const char *name;
4463 	unsigned long *n = m->private;
4464 	int node;
4465 	int i;
4466 
4467 	if (!(cachep->flags & SLAB_STORE_USER))
4468 		return 0;
4469 	if (!(cachep->flags & SLAB_RED_ZONE))
4470 		return 0;
4471 
4472 	/* OK, we can do it */
4473 
4474 	n[1] = 0;
4475 
4476 	for_each_online_node(node) {
4477 		l3 = cachep->nodelists[node];
4478 		if (!l3)
4479 			continue;
4480 
4481 		check_irq_on();
4482 		spin_lock_irq(&l3->list_lock);
4483 
4484 		list_for_each_entry(slabp, &l3->slabs_full, list)
4485 			handle_slab(n, cachep, slabp);
4486 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4487 			handle_slab(n, cachep, slabp);
4488 		spin_unlock_irq(&l3->list_lock);
4489 	}
4490 	name = cachep->name;
4491 	if (n[0] == n[1]) {
4492 		/* Increase the buffer size */
4493 		mutex_unlock(&cache_chain_mutex);
4494 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4495 		if (!m->private) {
4496 			/* Too bad, we are really out */
4497 			m->private = n;
4498 			mutex_lock(&cache_chain_mutex);
4499 			return -ENOMEM;
4500 		}
4501 		*(unsigned long *)m->private = n[0] * 2;
4502 		kfree(n);
4503 		mutex_lock(&cache_chain_mutex);
4504 		/* Now make sure this entry will be retried */
4505 		m->count = m->size;
4506 		return 0;
4507 	}
4508 	for (i = 0; i < n[1]; i++) {
4509 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4510 		show_symbol(m, n[2*i+2]);
4511 		seq_putc(m, '\n');
4512 	}
4513 
4514 	return 0;
4515 }
4516 
4517 const struct seq_operations slabstats_op = {
4518 	.start = leaks_start,
4519 	.next = s_next,
4520 	.stop = s_stop,
4521 	.show = leaks_show,
4522 };
4523 #endif
4524 #endif
4525 
4526 /**
4527  * ksize - get the actual amount of memory allocated for a given object
4528  * @objp: Pointer to the object
4529  *
4530  * kmalloc may internally round up allocations and return more memory
4531  * than requested. ksize() can be used to determine the actual amount of
4532  * memory allocated. The caller may use this additional memory, even though
4533  * a smaller amount of memory was initially specified with the kmalloc call.
4534  * The caller must guarantee that objp points to a valid object previously
4535  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4536  * must not be freed during the duration of the call.
4537  */
4538 size_t ksize(const void *objp)
4539 {
4540 	if (unlikely(objp == NULL))
4541 		return 0;
4542 
4543 	return obj_size(virt_to_cache(objp));
4544 }
4545