xref: /openbmc/linux/mm/slab.c (revision 1da177e4)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same intializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in kmem_cache_t and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the semaphore 'cache_chain_sem'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  */
79 
80 #include	<linux/config.h>
81 #include	<linux/slab.h>
82 #include	<linux/mm.h>
83 #include	<linux/swap.h>
84 #include	<linux/cache.h>
85 #include	<linux/interrupt.h>
86 #include	<linux/init.h>
87 #include	<linux/compiler.h>
88 #include	<linux/seq_file.h>
89 #include	<linux/notifier.h>
90 #include	<linux/kallsyms.h>
91 #include	<linux/cpu.h>
92 #include	<linux/sysctl.h>
93 #include	<linux/module.h>
94 #include	<linux/rcupdate.h>
95 
96 #include	<asm/uaccess.h>
97 #include	<asm/cacheflush.h>
98 #include	<asm/tlbflush.h>
99 #include	<asm/page.h>
100 
101 /*
102  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
103  *		  SLAB_RED_ZONE & SLAB_POISON.
104  *		  0 for faster, smaller code (especially in the critical paths).
105  *
106  * STATS	- 1 to collect stats for /proc/slabinfo.
107  *		  0 for faster, smaller code (especially in the critical paths).
108  *
109  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
110  */
111 
112 #ifdef CONFIG_DEBUG_SLAB
113 #define	DEBUG		1
114 #define	STATS		1
115 #define	FORCED_DEBUG	1
116 #else
117 #define	DEBUG		0
118 #define	STATS		0
119 #define	FORCED_DEBUG	0
120 #endif
121 
122 
123 /* Shouldn't this be in a header file somewhere? */
124 #define	BYTES_PER_WORD		sizeof(void *)
125 
126 #ifndef cache_line_size
127 #define cache_line_size()	L1_CACHE_BYTES
128 #endif
129 
130 #ifndef ARCH_KMALLOC_MINALIGN
131 /*
132  * Enforce a minimum alignment for the kmalloc caches.
133  * Usually, the kmalloc caches are cache_line_size() aligned, except when
134  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
135  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
136  * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
137  * Note that this flag disables some debug features.
138  */
139 #define ARCH_KMALLOC_MINALIGN 0
140 #endif
141 
142 #ifndef ARCH_SLAB_MINALIGN
143 /*
144  * Enforce a minimum alignment for all caches.
145  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
146  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
147  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
148  * some debug features.
149  */
150 #define ARCH_SLAB_MINALIGN 0
151 #endif
152 
153 #ifndef ARCH_KMALLOC_FLAGS
154 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
155 #endif
156 
157 /* Legal flag mask for kmem_cache_create(). */
158 #if DEBUG
159 # define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
160 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
161 			 SLAB_NO_REAP | SLAB_CACHE_DMA | \
162 			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
163 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
164 			 SLAB_DESTROY_BY_RCU)
165 #else
166 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
167 			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
168 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
169 			 SLAB_DESTROY_BY_RCU)
170 #endif
171 
172 /*
173  * kmem_bufctl_t:
174  *
175  * Bufctl's are used for linking objs within a slab
176  * linked offsets.
177  *
178  * This implementation relies on "struct page" for locating the cache &
179  * slab an object belongs to.
180  * This allows the bufctl structure to be small (one int), but limits
181  * the number of objects a slab (not a cache) can contain when off-slab
182  * bufctls are used. The limit is the size of the largest general cache
183  * that does not use off-slab slabs.
184  * For 32bit archs with 4 kB pages, is this 56.
185  * This is not serious, as it is only for large objects, when it is unwise
186  * to have too many per slab.
187  * Note: This limit can be raised by introducing a general cache whose size
188  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
189  */
190 
191 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
192 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
193 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)
194 
195 /* Max number of objs-per-slab for caches which use off-slab slabs.
196  * Needed to avoid a possible looping condition in cache_grow().
197  */
198 static unsigned long offslab_limit;
199 
200 /*
201  * struct slab
202  *
203  * Manages the objs in a slab. Placed either at the beginning of mem allocated
204  * for a slab, or allocated from an general cache.
205  * Slabs are chained into three list: fully used, partial, fully free slabs.
206  */
207 struct slab {
208 	struct list_head	list;
209 	unsigned long		colouroff;
210 	void			*s_mem;		/* including colour offset */
211 	unsigned int		inuse;		/* num of objs active in slab */
212 	kmem_bufctl_t		free;
213 };
214 
215 /*
216  * struct slab_rcu
217  *
218  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
219  * arrange for kmem_freepages to be called via RCU.  This is useful if
220  * we need to approach a kernel structure obliquely, from its address
221  * obtained without the usual locking.  We can lock the structure to
222  * stabilize it and check it's still at the given address, only if we
223  * can be sure that the memory has not been meanwhile reused for some
224  * other kind of object (which our subsystem's lock might corrupt).
225  *
226  * rcu_read_lock before reading the address, then rcu_read_unlock after
227  * taking the spinlock within the structure expected at that address.
228  *
229  * We assume struct slab_rcu can overlay struct slab when destroying.
230  */
231 struct slab_rcu {
232 	struct rcu_head		head;
233 	kmem_cache_t		*cachep;
234 	void			*addr;
235 };
236 
237 /*
238  * struct array_cache
239  *
240  * Per cpu structures
241  * Purpose:
242  * - LIFO ordering, to hand out cache-warm objects from _alloc
243  * - reduce the number of linked list operations
244  * - reduce spinlock operations
245  *
246  * The limit is stored in the per-cpu structure to reduce the data cache
247  * footprint.
248  *
249  */
250 struct array_cache {
251 	unsigned int avail;
252 	unsigned int limit;
253 	unsigned int batchcount;
254 	unsigned int touched;
255 };
256 
257 /* bootstrap: The caches do not work without cpuarrays anymore,
258  * but the cpuarrays are allocated from the generic caches...
259  */
260 #define BOOT_CPUCACHE_ENTRIES	1
261 struct arraycache_init {
262 	struct array_cache cache;
263 	void * entries[BOOT_CPUCACHE_ENTRIES];
264 };
265 
266 /*
267  * The slab lists of all objects.
268  * Hopefully reduce the internal fragmentation
269  * NUMA: The spinlock could be moved from the kmem_cache_t
270  * into this structure, too. Figure out what causes
271  * fewer cross-node spinlock operations.
272  */
273 struct kmem_list3 {
274 	struct list_head	slabs_partial;	/* partial list first, better asm code */
275 	struct list_head	slabs_full;
276 	struct list_head	slabs_free;
277 	unsigned long	free_objects;
278 	int		free_touched;
279 	unsigned long	next_reap;
280 	struct array_cache	*shared;
281 };
282 
283 #define LIST3_INIT(parent) \
284 	{ \
285 		.slabs_full	= LIST_HEAD_INIT(parent.slabs_full), \
286 		.slabs_partial	= LIST_HEAD_INIT(parent.slabs_partial), \
287 		.slabs_free	= LIST_HEAD_INIT(parent.slabs_free) \
288 	}
289 #define list3_data(cachep) \
290 	(&(cachep)->lists)
291 
292 /* NUMA: per-node */
293 #define list3_data_ptr(cachep, ptr) \
294 		list3_data(cachep)
295 
296 /*
297  * kmem_cache_t
298  *
299  * manages a cache.
300  */
301 
302 struct kmem_cache_s {
303 /* 1) per-cpu data, touched during every alloc/free */
304 	struct array_cache	*array[NR_CPUS];
305 	unsigned int		batchcount;
306 	unsigned int		limit;
307 /* 2) touched by every alloc & free from the backend */
308 	struct kmem_list3	lists;
309 	/* NUMA: kmem_3list_t	*nodelists[MAX_NUMNODES] */
310 	unsigned int		objsize;
311 	unsigned int	 	flags;	/* constant flags */
312 	unsigned int		num;	/* # of objs per slab */
313 	unsigned int		free_limit; /* upper limit of objects in the lists */
314 	spinlock_t		spinlock;
315 
316 /* 3) cache_grow/shrink */
317 	/* order of pgs per slab (2^n) */
318 	unsigned int		gfporder;
319 
320 	/* force GFP flags, e.g. GFP_DMA */
321 	unsigned int		gfpflags;
322 
323 	size_t			colour;		/* cache colouring range */
324 	unsigned int		colour_off;	/* colour offset */
325 	unsigned int		colour_next;	/* cache colouring */
326 	kmem_cache_t		*slabp_cache;
327 	unsigned int		slab_size;
328 	unsigned int		dflags;		/* dynamic flags */
329 
330 	/* constructor func */
331 	void (*ctor)(void *, kmem_cache_t *, unsigned long);
332 
333 	/* de-constructor func */
334 	void (*dtor)(void *, kmem_cache_t *, unsigned long);
335 
336 /* 4) cache creation/removal */
337 	const char		*name;
338 	struct list_head	next;
339 
340 /* 5) statistics */
341 #if STATS
342 	unsigned long		num_active;
343 	unsigned long		num_allocations;
344 	unsigned long		high_mark;
345 	unsigned long		grown;
346 	unsigned long		reaped;
347 	unsigned long 		errors;
348 	unsigned long		max_freeable;
349 	unsigned long		node_allocs;
350 	atomic_t		allochit;
351 	atomic_t		allocmiss;
352 	atomic_t		freehit;
353 	atomic_t		freemiss;
354 #endif
355 #if DEBUG
356 	int			dbghead;
357 	int			reallen;
358 #endif
359 };
360 
361 #define CFLGS_OFF_SLAB		(0x80000000UL)
362 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
363 
364 #define BATCHREFILL_LIMIT	16
365 /* Optimization question: fewer reaps means less
366  * probability for unnessary cpucache drain/refill cycles.
367  *
368  * OTHO the cpuarrays can contain lots of objects,
369  * which could lock up otherwise freeable slabs.
370  */
371 #define REAPTIMEOUT_CPUC	(2*HZ)
372 #define REAPTIMEOUT_LIST3	(4*HZ)
373 
374 #if STATS
375 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
376 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
377 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
378 #define	STATS_INC_GROWN(x)	((x)->grown++)
379 #define	STATS_INC_REAPED(x)	((x)->reaped++)
380 #define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \
381 					(x)->high_mark = (x)->num_active; \
382 				} while (0)
383 #define	STATS_INC_ERR(x)	((x)->errors++)
384 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
385 #define	STATS_SET_FREEABLE(x, i) \
386 				do { if ((x)->max_freeable < i) \
387 					(x)->max_freeable = i; \
388 				} while (0)
389 
390 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
391 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
392 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
393 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
394 #else
395 #define	STATS_INC_ACTIVE(x)	do { } while (0)
396 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
397 #define	STATS_INC_ALLOCED(x)	do { } while (0)
398 #define	STATS_INC_GROWN(x)	do { } while (0)
399 #define	STATS_INC_REAPED(x)	do { } while (0)
400 #define	STATS_SET_HIGH(x)	do { } while (0)
401 #define	STATS_INC_ERR(x)	do { } while (0)
402 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
403 #define	STATS_SET_FREEABLE(x, i) \
404 				do { } while (0)
405 
406 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
407 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
408 #define STATS_INC_FREEHIT(x)	do { } while (0)
409 #define STATS_INC_FREEMISS(x)	do { } while (0)
410 #endif
411 
412 #if DEBUG
413 /* Magic nums for obj red zoning.
414  * Placed in the first word before and the first word after an obj.
415  */
416 #define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */
417 #define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */
418 
419 /* ...and for poisoning */
420 #define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */
421 #define POISON_FREE	0x6b	/* for use-after-free poisoning */
422 #define	POISON_END	0xa5	/* end-byte of poisoning */
423 
424 /* memory layout of objects:
425  * 0		: objp
426  * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
427  * 		the end of an object is aligned with the end of the real
428  * 		allocation. Catches writes behind the end of the allocation.
429  * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
430  * 		redzone word.
431  * cachep->dbghead: The real object.
432  * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
433  * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
434  */
435 static int obj_dbghead(kmem_cache_t *cachep)
436 {
437 	return cachep->dbghead;
438 }
439 
440 static int obj_reallen(kmem_cache_t *cachep)
441 {
442 	return cachep->reallen;
443 }
444 
445 static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
446 {
447 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
448 	return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
449 }
450 
451 static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
452 {
453 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
454 	if (cachep->flags & SLAB_STORE_USER)
455 		return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
456 	return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
457 }
458 
459 static void **dbg_userword(kmem_cache_t *cachep, void *objp)
460 {
461 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
462 	return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
463 }
464 
465 #else
466 
467 #define obj_dbghead(x)			0
468 #define obj_reallen(cachep)		(cachep->objsize)
469 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
470 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
471 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
472 
473 #endif
474 
475 /*
476  * Maximum size of an obj (in 2^order pages)
477  * and absolute limit for the gfp order.
478  */
479 #if defined(CONFIG_LARGE_ALLOCS)
480 #define	MAX_OBJ_ORDER	13	/* up to 32Mb */
481 #define	MAX_GFP_ORDER	13	/* up to 32Mb */
482 #elif defined(CONFIG_MMU)
483 #define	MAX_OBJ_ORDER	5	/* 32 pages */
484 #define	MAX_GFP_ORDER	5	/* 32 pages */
485 #else
486 #define	MAX_OBJ_ORDER	8	/* up to 1Mb */
487 #define	MAX_GFP_ORDER	8	/* up to 1Mb */
488 #endif
489 
490 /*
491  * Do not go above this order unless 0 objects fit into the slab.
492  */
493 #define	BREAK_GFP_ORDER_HI	1
494 #define	BREAK_GFP_ORDER_LO	0
495 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
496 
497 /* Macros for storing/retrieving the cachep and or slab from the
498  * global 'mem_map'. These are used to find the slab an obj belongs to.
499  * With kfree(), these are used to find the cache which an obj belongs to.
500  */
501 #define	SET_PAGE_CACHE(pg,x)  ((pg)->lru.next = (struct list_head *)(x))
502 #define	GET_PAGE_CACHE(pg)    ((kmem_cache_t *)(pg)->lru.next)
503 #define	SET_PAGE_SLAB(pg,x)   ((pg)->lru.prev = (struct list_head *)(x))
504 #define	GET_PAGE_SLAB(pg)     ((struct slab *)(pg)->lru.prev)
505 
506 /* These are the default caches for kmalloc. Custom caches can have other sizes. */
507 struct cache_sizes malloc_sizes[] = {
508 #define CACHE(x) { .cs_size = (x) },
509 #include <linux/kmalloc_sizes.h>
510 	CACHE(ULONG_MAX)
511 #undef CACHE
512 };
513 EXPORT_SYMBOL(malloc_sizes);
514 
515 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
516 struct cache_names {
517 	char *name;
518 	char *name_dma;
519 };
520 
521 static struct cache_names __initdata cache_names[] = {
522 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
523 #include <linux/kmalloc_sizes.h>
524 	{ NULL, }
525 #undef CACHE
526 };
527 
528 static struct arraycache_init initarray_cache __initdata =
529 	{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
530 static struct arraycache_init initarray_generic =
531 	{ { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
532 
533 /* internal cache of cache description objs */
534 static kmem_cache_t cache_cache = {
535 	.lists		= LIST3_INIT(cache_cache.lists),
536 	.batchcount	= 1,
537 	.limit		= BOOT_CPUCACHE_ENTRIES,
538 	.objsize	= sizeof(kmem_cache_t),
539 	.flags		= SLAB_NO_REAP,
540 	.spinlock	= SPIN_LOCK_UNLOCKED,
541 	.name		= "kmem_cache",
542 #if DEBUG
543 	.reallen	= sizeof(kmem_cache_t),
544 #endif
545 };
546 
547 /* Guard access to the cache-chain. */
548 static struct semaphore	cache_chain_sem;
549 static struct list_head cache_chain;
550 
551 /*
552  * vm_enough_memory() looks at this to determine how many
553  * slab-allocated pages are possibly freeable under pressure
554  *
555  * SLAB_RECLAIM_ACCOUNT turns this on per-slab
556  */
557 atomic_t slab_reclaim_pages;
558 EXPORT_SYMBOL(slab_reclaim_pages);
559 
560 /*
561  * chicken and egg problem: delay the per-cpu array allocation
562  * until the general caches are up.
563  */
564 static enum {
565 	NONE,
566 	PARTIAL,
567 	FULL
568 } g_cpucache_up;
569 
570 static DEFINE_PER_CPU(struct work_struct, reap_work);
571 
572 static void free_block(kmem_cache_t* cachep, void** objpp, int len);
573 static void enable_cpucache (kmem_cache_t *cachep);
574 static void cache_reap (void *unused);
575 
576 static inline void **ac_entry(struct array_cache *ac)
577 {
578 	return (void**)(ac+1);
579 }
580 
581 static inline struct array_cache *ac_data(kmem_cache_t *cachep)
582 {
583 	return cachep->array[smp_processor_id()];
584 }
585 
586 static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
587 {
588 	struct cache_sizes *csizep = malloc_sizes;
589 
590 #if DEBUG
591 	/* This happens if someone tries to call
592  	* kmem_cache_create(), or __kmalloc(), before
593  	* the generic caches are initialized.
594  	*/
595 	BUG_ON(csizep->cs_cachep == NULL);
596 #endif
597 	while (size > csizep->cs_size)
598 		csizep++;
599 
600 	/*
601 	 * Really subtile: The last entry with cs->cs_size==ULONG_MAX
602 	 * has cs_{dma,}cachep==NULL. Thus no special case
603 	 * for large kmalloc calls required.
604 	 */
605 	if (unlikely(gfpflags & GFP_DMA))
606 		return csizep->cs_dmacachep;
607 	return csizep->cs_cachep;
608 }
609 
610 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
611 static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
612 		 int flags, size_t *left_over, unsigned int *num)
613 {
614 	int i;
615 	size_t wastage = PAGE_SIZE<<gfporder;
616 	size_t extra = 0;
617 	size_t base = 0;
618 
619 	if (!(flags & CFLGS_OFF_SLAB)) {
620 		base = sizeof(struct slab);
621 		extra = sizeof(kmem_bufctl_t);
622 	}
623 	i = 0;
624 	while (i*size + ALIGN(base+i*extra, align) <= wastage)
625 		i++;
626 	if (i > 0)
627 		i--;
628 
629 	if (i > SLAB_LIMIT)
630 		i = SLAB_LIMIT;
631 
632 	*num = i;
633 	wastage -= i*size;
634 	wastage -= ALIGN(base+i*extra, align);
635 	*left_over = wastage;
636 }
637 
638 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
639 
640 static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
641 {
642 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
643 		function, cachep->name, msg);
644 	dump_stack();
645 }
646 
647 /*
648  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
649  * via the workqueue/eventd.
650  * Add the CPU number into the expiration time to minimize the possibility of
651  * the CPUs getting into lockstep and contending for the global cache chain
652  * lock.
653  */
654 static void __devinit start_cpu_timer(int cpu)
655 {
656 	struct work_struct *reap_work = &per_cpu(reap_work, cpu);
657 
658 	/*
659 	 * When this gets called from do_initcalls via cpucache_init(),
660 	 * init_workqueues() has already run, so keventd will be setup
661 	 * at that time.
662 	 */
663 	if (keventd_up() && reap_work->func == NULL) {
664 		INIT_WORK(reap_work, cache_reap, NULL);
665 		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
666 	}
667 }
668 
669 static struct array_cache *alloc_arraycache(int cpu, int entries,
670 						int batchcount)
671 {
672 	int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
673 	struct array_cache *nc = NULL;
674 
675 	if (cpu != -1) {
676 		kmem_cache_t *cachep;
677 		cachep = kmem_find_general_cachep(memsize, GFP_KERNEL);
678 		if (cachep)
679 			nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu));
680 	}
681 	if (!nc)
682 		nc = kmalloc(memsize, GFP_KERNEL);
683 	if (nc) {
684 		nc->avail = 0;
685 		nc->limit = entries;
686 		nc->batchcount = batchcount;
687 		nc->touched = 0;
688 	}
689 	return nc;
690 }
691 
692 static int __devinit cpuup_callback(struct notifier_block *nfb,
693 				  unsigned long action, void *hcpu)
694 {
695 	long cpu = (long)hcpu;
696 	kmem_cache_t* cachep;
697 
698 	switch (action) {
699 	case CPU_UP_PREPARE:
700 		down(&cache_chain_sem);
701 		list_for_each_entry(cachep, &cache_chain, next) {
702 			struct array_cache *nc;
703 
704 			nc = alloc_arraycache(cpu, cachep->limit, cachep->batchcount);
705 			if (!nc)
706 				goto bad;
707 
708 			spin_lock_irq(&cachep->spinlock);
709 			cachep->array[cpu] = nc;
710 			cachep->free_limit = (1+num_online_cpus())*cachep->batchcount
711 						+ cachep->num;
712 			spin_unlock_irq(&cachep->spinlock);
713 
714 		}
715 		up(&cache_chain_sem);
716 		break;
717 	case CPU_ONLINE:
718 		start_cpu_timer(cpu);
719 		break;
720 #ifdef CONFIG_HOTPLUG_CPU
721 	case CPU_DEAD:
722 		/* fall thru */
723 	case CPU_UP_CANCELED:
724 		down(&cache_chain_sem);
725 
726 		list_for_each_entry(cachep, &cache_chain, next) {
727 			struct array_cache *nc;
728 
729 			spin_lock_irq(&cachep->spinlock);
730 			/* cpu is dead; no one can alloc from it. */
731 			nc = cachep->array[cpu];
732 			cachep->array[cpu] = NULL;
733 			cachep->free_limit -= cachep->batchcount;
734 			free_block(cachep, ac_entry(nc), nc->avail);
735 			spin_unlock_irq(&cachep->spinlock);
736 			kfree(nc);
737 		}
738 		up(&cache_chain_sem);
739 		break;
740 #endif
741 	}
742 	return NOTIFY_OK;
743 bad:
744 	up(&cache_chain_sem);
745 	return NOTIFY_BAD;
746 }
747 
748 static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
749 
750 /* Initialisation.
751  * Called after the gfp() functions have been enabled, and before smp_init().
752  */
753 void __init kmem_cache_init(void)
754 {
755 	size_t left_over;
756 	struct cache_sizes *sizes;
757 	struct cache_names *names;
758 
759 	/*
760 	 * Fragmentation resistance on low memory - only use bigger
761 	 * page orders on machines with more than 32MB of memory.
762 	 */
763 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
764 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
765 
766 
767 	/* Bootstrap is tricky, because several objects are allocated
768 	 * from caches that do not exist yet:
769 	 * 1) initialize the cache_cache cache: it contains the kmem_cache_t
770 	 *    structures of all caches, except cache_cache itself: cache_cache
771 	 *    is statically allocated.
772 	 *    Initially an __init data area is used for the head array, it's
773 	 *    replaced with a kmalloc allocated array at the end of the bootstrap.
774 	 * 2) Create the first kmalloc cache.
775 	 *    The kmem_cache_t for the new cache is allocated normally. An __init
776 	 *    data area is used for the head array.
777 	 * 3) Create the remaining kmalloc caches, with minimally sized head arrays.
778 	 * 4) Replace the __init data head arrays for cache_cache and the first
779 	 *    kmalloc cache with kmalloc allocated arrays.
780 	 * 5) Resize the head arrays of the kmalloc caches to their final sizes.
781 	 */
782 
783 	/* 1) create the cache_cache */
784 	init_MUTEX(&cache_chain_sem);
785 	INIT_LIST_HEAD(&cache_chain);
786 	list_add(&cache_cache.next, &cache_chain);
787 	cache_cache.colour_off = cache_line_size();
788 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
789 
790 	cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
791 
792 	cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
793 				&left_over, &cache_cache.num);
794 	if (!cache_cache.num)
795 		BUG();
796 
797 	cache_cache.colour = left_over/cache_cache.colour_off;
798 	cache_cache.colour_next = 0;
799 	cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
800 				sizeof(struct slab), cache_line_size());
801 
802 	/* 2+3) create the kmalloc caches */
803 	sizes = malloc_sizes;
804 	names = cache_names;
805 
806 	while (sizes->cs_size != ULONG_MAX) {
807 		/* For performance, all the general caches are L1 aligned.
808 		 * This should be particularly beneficial on SMP boxes, as it
809 		 * eliminates "false sharing".
810 		 * Note for systems short on memory removing the alignment will
811 		 * allow tighter packing of the smaller caches. */
812 		sizes->cs_cachep = kmem_cache_create(names->name,
813 			sizes->cs_size, ARCH_KMALLOC_MINALIGN,
814 			(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
815 
816 		/* Inc off-slab bufctl limit until the ceiling is hit. */
817 		if (!(OFF_SLAB(sizes->cs_cachep))) {
818 			offslab_limit = sizes->cs_size-sizeof(struct slab);
819 			offslab_limit /= sizeof(kmem_bufctl_t);
820 		}
821 
822 		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
823 			sizes->cs_size, ARCH_KMALLOC_MINALIGN,
824 			(ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
825 			NULL, NULL);
826 
827 		sizes++;
828 		names++;
829 	}
830 	/* 4) Replace the bootstrap head arrays */
831 	{
832 		void * ptr;
833 
834 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
835 		local_irq_disable();
836 		BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
837 		memcpy(ptr, ac_data(&cache_cache), sizeof(struct arraycache_init));
838 		cache_cache.array[smp_processor_id()] = ptr;
839 		local_irq_enable();
840 
841 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
842 		local_irq_disable();
843 		BUG_ON(ac_data(malloc_sizes[0].cs_cachep) != &initarray_generic.cache);
844 		memcpy(ptr, ac_data(malloc_sizes[0].cs_cachep),
845 				sizeof(struct arraycache_init));
846 		malloc_sizes[0].cs_cachep->array[smp_processor_id()] = ptr;
847 		local_irq_enable();
848 	}
849 
850 	/* 5) resize the head arrays to their final sizes */
851 	{
852 		kmem_cache_t *cachep;
853 		down(&cache_chain_sem);
854 		list_for_each_entry(cachep, &cache_chain, next)
855 			enable_cpucache(cachep);
856 		up(&cache_chain_sem);
857 	}
858 
859 	/* Done! */
860 	g_cpucache_up = FULL;
861 
862 	/* Register a cpu startup notifier callback
863 	 * that initializes ac_data for all new cpus
864 	 */
865 	register_cpu_notifier(&cpucache_notifier);
866 
867 
868 	/* The reap timers are started later, with a module init call:
869 	 * That part of the kernel is not yet operational.
870 	 */
871 }
872 
873 static int __init cpucache_init(void)
874 {
875 	int cpu;
876 
877 	/*
878 	 * Register the timers that return unneeded
879 	 * pages to gfp.
880 	 */
881 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
882 		if (cpu_online(cpu))
883 			start_cpu_timer(cpu);
884 	}
885 
886 	return 0;
887 }
888 
889 __initcall(cpucache_init);
890 
891 /*
892  * Interface to system's page allocator. No need to hold the cache-lock.
893  *
894  * If we requested dmaable memory, we will get it. Even if we
895  * did not request dmaable memory, we might get it, but that
896  * would be relatively rare and ignorable.
897  */
898 static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
899 {
900 	struct page *page;
901 	void *addr;
902 	int i;
903 
904 	flags |= cachep->gfpflags;
905 	if (likely(nodeid == -1)) {
906 		page = alloc_pages(flags, cachep->gfporder);
907 	} else {
908 		page = alloc_pages_node(nodeid, flags, cachep->gfporder);
909 	}
910 	if (!page)
911 		return NULL;
912 	addr = page_address(page);
913 
914 	i = (1 << cachep->gfporder);
915 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
916 		atomic_add(i, &slab_reclaim_pages);
917 	add_page_state(nr_slab, i);
918 	while (i--) {
919 		SetPageSlab(page);
920 		page++;
921 	}
922 	return addr;
923 }
924 
925 /*
926  * Interface to system's page release.
927  */
928 static void kmem_freepages(kmem_cache_t *cachep, void *addr)
929 {
930 	unsigned long i = (1<<cachep->gfporder);
931 	struct page *page = virt_to_page(addr);
932 	const unsigned long nr_freed = i;
933 
934 	while (i--) {
935 		if (!TestClearPageSlab(page))
936 			BUG();
937 		page++;
938 	}
939 	sub_page_state(nr_slab, nr_freed);
940 	if (current->reclaim_state)
941 		current->reclaim_state->reclaimed_slab += nr_freed;
942 	free_pages((unsigned long)addr, cachep->gfporder);
943 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
944 		atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
945 }
946 
947 static void kmem_rcu_free(struct rcu_head *head)
948 {
949 	struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
950 	kmem_cache_t *cachep = slab_rcu->cachep;
951 
952 	kmem_freepages(cachep, slab_rcu->addr);
953 	if (OFF_SLAB(cachep))
954 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
955 }
956 
957 #if DEBUG
958 
959 #ifdef CONFIG_DEBUG_PAGEALLOC
960 static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
961 				unsigned long caller)
962 {
963 	int size = obj_reallen(cachep);
964 
965 	addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
966 
967 	if (size < 5*sizeof(unsigned long))
968 		return;
969 
970 	*addr++=0x12345678;
971 	*addr++=caller;
972 	*addr++=smp_processor_id();
973 	size -= 3*sizeof(unsigned long);
974 	{
975 		unsigned long *sptr = &caller;
976 		unsigned long svalue;
977 
978 		while (!kstack_end(sptr)) {
979 			svalue = *sptr++;
980 			if (kernel_text_address(svalue)) {
981 				*addr++=svalue;
982 				size -= sizeof(unsigned long);
983 				if (size <= sizeof(unsigned long))
984 					break;
985 			}
986 		}
987 
988 	}
989 	*addr++=0x87654321;
990 }
991 #endif
992 
993 static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
994 {
995 	int size = obj_reallen(cachep);
996 	addr = &((char*)addr)[obj_dbghead(cachep)];
997 
998 	memset(addr, val, size);
999 	*(unsigned char *)(addr+size-1) = POISON_END;
1000 }
1001 
1002 static void dump_line(char *data, int offset, int limit)
1003 {
1004 	int i;
1005 	printk(KERN_ERR "%03x:", offset);
1006 	for (i=0;i<limit;i++) {
1007 		printk(" %02x", (unsigned char)data[offset+i]);
1008 	}
1009 	printk("\n");
1010 }
1011 #endif
1012 
1013 #if DEBUG
1014 
1015 static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
1016 {
1017 	int i, size;
1018 	char *realobj;
1019 
1020 	if (cachep->flags & SLAB_RED_ZONE) {
1021 		printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
1022 			*dbg_redzone1(cachep, objp),
1023 			*dbg_redzone2(cachep, objp));
1024 	}
1025 
1026 	if (cachep->flags & SLAB_STORE_USER) {
1027 		printk(KERN_ERR "Last user: [<%p>]",
1028 				*dbg_userword(cachep, objp));
1029 		print_symbol("(%s)",
1030 				(unsigned long)*dbg_userword(cachep, objp));
1031 		printk("\n");
1032 	}
1033 	realobj = (char*)objp+obj_dbghead(cachep);
1034 	size = obj_reallen(cachep);
1035 	for (i=0; i<size && lines;i+=16, lines--) {
1036 		int limit;
1037 		limit = 16;
1038 		if (i+limit > size)
1039 			limit = size-i;
1040 		dump_line(realobj, i, limit);
1041 	}
1042 }
1043 
1044 static void check_poison_obj(kmem_cache_t *cachep, void *objp)
1045 {
1046 	char *realobj;
1047 	int size, i;
1048 	int lines = 0;
1049 
1050 	realobj = (char*)objp+obj_dbghead(cachep);
1051 	size = obj_reallen(cachep);
1052 
1053 	for (i=0;i<size;i++) {
1054 		char exp = POISON_FREE;
1055 		if (i == size-1)
1056 			exp = POISON_END;
1057 		if (realobj[i] != exp) {
1058 			int limit;
1059 			/* Mismatch ! */
1060 			/* Print header */
1061 			if (lines == 0) {
1062 				printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
1063 						realobj, size);
1064 				print_objinfo(cachep, objp, 0);
1065 			}
1066 			/* Hexdump the affected line */
1067 			i = (i/16)*16;
1068 			limit = 16;
1069 			if (i+limit > size)
1070 				limit = size-i;
1071 			dump_line(realobj, i, limit);
1072 			i += 16;
1073 			lines++;
1074 			/* Limit to 5 lines */
1075 			if (lines > 5)
1076 				break;
1077 		}
1078 	}
1079 	if (lines != 0) {
1080 		/* Print some data about the neighboring objects, if they
1081 		 * exist:
1082 		 */
1083 		struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp));
1084 		int objnr;
1085 
1086 		objnr = (objp-slabp->s_mem)/cachep->objsize;
1087 		if (objnr) {
1088 			objp = slabp->s_mem+(objnr-1)*cachep->objsize;
1089 			realobj = (char*)objp+obj_dbghead(cachep);
1090 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1091 						realobj, size);
1092 			print_objinfo(cachep, objp, 2);
1093 		}
1094 		if (objnr+1 < cachep->num) {
1095 			objp = slabp->s_mem+(objnr+1)*cachep->objsize;
1096 			realobj = (char*)objp+obj_dbghead(cachep);
1097 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1098 						realobj, size);
1099 			print_objinfo(cachep, objp, 2);
1100 		}
1101 	}
1102 }
1103 #endif
1104 
1105 /* Destroy all the objs in a slab, and release the mem back to the system.
1106  * Before calling the slab must have been unlinked from the cache.
1107  * The cache-lock is not held/needed.
1108  */
1109 static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
1110 {
1111 	void *addr = slabp->s_mem - slabp->colouroff;
1112 
1113 #if DEBUG
1114 	int i;
1115 	for (i = 0; i < cachep->num; i++) {
1116 		void *objp = slabp->s_mem + cachep->objsize * i;
1117 
1118 		if (cachep->flags & SLAB_POISON) {
1119 #ifdef CONFIG_DEBUG_PAGEALLOC
1120 			if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
1121 				kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
1122 			else
1123 				check_poison_obj(cachep, objp);
1124 #else
1125 			check_poison_obj(cachep, objp);
1126 #endif
1127 		}
1128 		if (cachep->flags & SLAB_RED_ZONE) {
1129 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1130 				slab_error(cachep, "start of a freed object "
1131 							"was overwritten");
1132 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1133 				slab_error(cachep, "end of a freed object "
1134 							"was overwritten");
1135 		}
1136 		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1137 			(cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
1138 	}
1139 #else
1140 	if (cachep->dtor) {
1141 		int i;
1142 		for (i = 0; i < cachep->num; i++) {
1143 			void* objp = slabp->s_mem+cachep->objsize*i;
1144 			(cachep->dtor)(objp, cachep, 0);
1145 		}
1146 	}
1147 #endif
1148 
1149 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1150 		struct slab_rcu *slab_rcu;
1151 
1152 		slab_rcu = (struct slab_rcu *) slabp;
1153 		slab_rcu->cachep = cachep;
1154 		slab_rcu->addr = addr;
1155 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1156 	} else {
1157 		kmem_freepages(cachep, addr);
1158 		if (OFF_SLAB(cachep))
1159 			kmem_cache_free(cachep->slabp_cache, slabp);
1160 	}
1161 }
1162 
1163 /**
1164  * kmem_cache_create - Create a cache.
1165  * @name: A string which is used in /proc/slabinfo to identify this cache.
1166  * @size: The size of objects to be created in this cache.
1167  * @align: The required alignment for the objects.
1168  * @flags: SLAB flags
1169  * @ctor: A constructor for the objects.
1170  * @dtor: A destructor for the objects.
1171  *
1172  * Returns a ptr to the cache on success, NULL on failure.
1173  * Cannot be called within a int, but can be interrupted.
1174  * The @ctor is run when new pages are allocated by the cache
1175  * and the @dtor is run before the pages are handed back.
1176  *
1177  * @name must be valid until the cache is destroyed. This implies that
1178  * the module calling this has to destroy the cache before getting
1179  * unloaded.
1180  *
1181  * The flags are
1182  *
1183  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1184  * to catch references to uninitialised memory.
1185  *
1186  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1187  * for buffer overruns.
1188  *
1189  * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
1190  * memory pressure.
1191  *
1192  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1193  * cacheline.  This can be beneficial if you're counting cycles as closely
1194  * as davem.
1195  */
1196 kmem_cache_t *
1197 kmem_cache_create (const char *name, size_t size, size_t align,
1198 	unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
1199 	void (*dtor)(void*, kmem_cache_t *, unsigned long))
1200 {
1201 	size_t left_over, slab_size, ralign;
1202 	kmem_cache_t *cachep = NULL;
1203 
1204 	/*
1205 	 * Sanity checks... these are all serious usage bugs.
1206 	 */
1207 	if ((!name) ||
1208 		in_interrupt() ||
1209 		(size < BYTES_PER_WORD) ||
1210 		(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
1211 		(dtor && !ctor)) {
1212 			printk(KERN_ERR "%s: Early error in slab %s\n",
1213 					__FUNCTION__, name);
1214 			BUG();
1215 		}
1216 
1217 #if DEBUG
1218 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
1219 	if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
1220 		/* No constructor, but inital state check requested */
1221 		printk(KERN_ERR "%s: No con, but init state check "
1222 				"requested - %s\n", __FUNCTION__, name);
1223 		flags &= ~SLAB_DEBUG_INITIAL;
1224 	}
1225 
1226 #if FORCED_DEBUG
1227 	/*
1228 	 * Enable redzoning and last user accounting, except for caches with
1229 	 * large objects, if the increased size would increase the object size
1230 	 * above the next power of two: caches with object sizes just above a
1231 	 * power of two have a significant amount of internal fragmentation.
1232 	 */
1233 	if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
1234 		flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
1235 	if (!(flags & SLAB_DESTROY_BY_RCU))
1236 		flags |= SLAB_POISON;
1237 #endif
1238 	if (flags & SLAB_DESTROY_BY_RCU)
1239 		BUG_ON(flags & SLAB_POISON);
1240 #endif
1241 	if (flags & SLAB_DESTROY_BY_RCU)
1242 		BUG_ON(dtor);
1243 
1244 	/*
1245 	 * Always checks flags, a caller might be expecting debug
1246 	 * support which isn't available.
1247 	 */
1248 	if (flags & ~CREATE_MASK)
1249 		BUG();
1250 
1251 	/* Check that size is in terms of words.  This is needed to avoid
1252 	 * unaligned accesses for some archs when redzoning is used, and makes
1253 	 * sure any on-slab bufctl's are also correctly aligned.
1254 	 */
1255 	if (size & (BYTES_PER_WORD-1)) {
1256 		size += (BYTES_PER_WORD-1);
1257 		size &= ~(BYTES_PER_WORD-1);
1258 	}
1259 
1260 	/* calculate out the final buffer alignment: */
1261 	/* 1) arch recommendation: can be overridden for debug */
1262 	if (flags & SLAB_HWCACHE_ALIGN) {
1263 		/* Default alignment: as specified by the arch code.
1264 		 * Except if an object is really small, then squeeze multiple
1265 		 * objects into one cacheline.
1266 		 */
1267 		ralign = cache_line_size();
1268 		while (size <= ralign/2)
1269 			ralign /= 2;
1270 	} else {
1271 		ralign = BYTES_PER_WORD;
1272 	}
1273 	/* 2) arch mandated alignment: disables debug if necessary */
1274 	if (ralign < ARCH_SLAB_MINALIGN) {
1275 		ralign = ARCH_SLAB_MINALIGN;
1276 		if (ralign > BYTES_PER_WORD)
1277 			flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
1278 	}
1279 	/* 3) caller mandated alignment: disables debug if necessary */
1280 	if (ralign < align) {
1281 		ralign = align;
1282 		if (ralign > BYTES_PER_WORD)
1283 			flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
1284 	}
1285 	/* 4) Store it. Note that the debug code below can reduce
1286 	 *    the alignment to BYTES_PER_WORD.
1287 	 */
1288 	align = ralign;
1289 
1290 	/* Get cache's description obj. */
1291 	cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
1292 	if (!cachep)
1293 		goto opps;
1294 	memset(cachep, 0, sizeof(kmem_cache_t));
1295 
1296 #if DEBUG
1297 	cachep->reallen = size;
1298 
1299 	if (flags & SLAB_RED_ZONE) {
1300 		/* redzoning only works with word aligned caches */
1301 		align = BYTES_PER_WORD;
1302 
1303 		/* add space for red zone words */
1304 		cachep->dbghead += BYTES_PER_WORD;
1305 		size += 2*BYTES_PER_WORD;
1306 	}
1307 	if (flags & SLAB_STORE_USER) {
1308 		/* user store requires word alignment and
1309 		 * one word storage behind the end of the real
1310 		 * object.
1311 		 */
1312 		align = BYTES_PER_WORD;
1313 		size += BYTES_PER_WORD;
1314 	}
1315 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
1316 	if (size > 128 && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
1317 		cachep->dbghead += PAGE_SIZE - size;
1318 		size = PAGE_SIZE;
1319 	}
1320 #endif
1321 #endif
1322 
1323 	/* Determine if the slab management is 'on' or 'off' slab. */
1324 	if (size >= (PAGE_SIZE>>3))
1325 		/*
1326 		 * Size is large, assume best to place the slab management obj
1327 		 * off-slab (should allow better packing of objs).
1328 		 */
1329 		flags |= CFLGS_OFF_SLAB;
1330 
1331 	size = ALIGN(size, align);
1332 
1333 	if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) {
1334 		/*
1335 		 * A VFS-reclaimable slab tends to have most allocations
1336 		 * as GFP_NOFS and we really don't want to have to be allocating
1337 		 * higher-order pages when we are unable to shrink dcache.
1338 		 */
1339 		cachep->gfporder = 0;
1340 		cache_estimate(cachep->gfporder, size, align, flags,
1341 					&left_over, &cachep->num);
1342 	} else {
1343 		/*
1344 		 * Calculate size (in pages) of slabs, and the num of objs per
1345 		 * slab.  This could be made much more intelligent.  For now,
1346 		 * try to avoid using high page-orders for slabs.  When the
1347 		 * gfp() funcs are more friendly towards high-order requests,
1348 		 * this should be changed.
1349 		 */
1350 		do {
1351 			unsigned int break_flag = 0;
1352 cal_wastage:
1353 			cache_estimate(cachep->gfporder, size, align, flags,
1354 						&left_over, &cachep->num);
1355 			if (break_flag)
1356 				break;
1357 			if (cachep->gfporder >= MAX_GFP_ORDER)
1358 				break;
1359 			if (!cachep->num)
1360 				goto next;
1361 			if (flags & CFLGS_OFF_SLAB &&
1362 					cachep->num > offslab_limit) {
1363 				/* This num of objs will cause problems. */
1364 				cachep->gfporder--;
1365 				break_flag++;
1366 				goto cal_wastage;
1367 			}
1368 
1369 			/*
1370 			 * Large num of objs is good, but v. large slabs are
1371 			 * currently bad for the gfp()s.
1372 			 */
1373 			if (cachep->gfporder >= slab_break_gfp_order)
1374 				break;
1375 
1376 			if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
1377 				break;	/* Acceptable internal fragmentation. */
1378 next:
1379 			cachep->gfporder++;
1380 		} while (1);
1381 	}
1382 
1383 	if (!cachep->num) {
1384 		printk("kmem_cache_create: couldn't create cache %s.\n", name);
1385 		kmem_cache_free(&cache_cache, cachep);
1386 		cachep = NULL;
1387 		goto opps;
1388 	}
1389 	slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
1390 				+ sizeof(struct slab), align);
1391 
1392 	/*
1393 	 * If the slab has been placed off-slab, and we have enough space then
1394 	 * move it on-slab. This is at the expense of any extra colouring.
1395 	 */
1396 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
1397 		flags &= ~CFLGS_OFF_SLAB;
1398 		left_over -= slab_size;
1399 	}
1400 
1401 	if (flags & CFLGS_OFF_SLAB) {
1402 		/* really off slab. No need for manual alignment */
1403 		slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
1404 	}
1405 
1406 	cachep->colour_off = cache_line_size();
1407 	/* Offset must be a multiple of the alignment. */
1408 	if (cachep->colour_off < align)
1409 		cachep->colour_off = align;
1410 	cachep->colour = left_over/cachep->colour_off;
1411 	cachep->slab_size = slab_size;
1412 	cachep->flags = flags;
1413 	cachep->gfpflags = 0;
1414 	if (flags & SLAB_CACHE_DMA)
1415 		cachep->gfpflags |= GFP_DMA;
1416 	spin_lock_init(&cachep->spinlock);
1417 	cachep->objsize = size;
1418 	/* NUMA */
1419 	INIT_LIST_HEAD(&cachep->lists.slabs_full);
1420 	INIT_LIST_HEAD(&cachep->lists.slabs_partial);
1421 	INIT_LIST_HEAD(&cachep->lists.slabs_free);
1422 
1423 	if (flags & CFLGS_OFF_SLAB)
1424 		cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
1425 	cachep->ctor = ctor;
1426 	cachep->dtor = dtor;
1427 	cachep->name = name;
1428 
1429 	/* Don't let CPUs to come and go */
1430 	lock_cpu_hotplug();
1431 
1432 	if (g_cpucache_up == FULL) {
1433 		enable_cpucache(cachep);
1434 	} else {
1435 		if (g_cpucache_up == NONE) {
1436 			/* Note: the first kmem_cache_create must create
1437 			 * the cache that's used by kmalloc(24), otherwise
1438 			 * the creation of further caches will BUG().
1439 			 */
1440 			cachep->array[smp_processor_id()] = &initarray_generic.cache;
1441 			g_cpucache_up = PARTIAL;
1442 		} else {
1443 			cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init),GFP_KERNEL);
1444 		}
1445 		BUG_ON(!ac_data(cachep));
1446 		ac_data(cachep)->avail = 0;
1447 		ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1448 		ac_data(cachep)->batchcount = 1;
1449 		ac_data(cachep)->touched = 0;
1450 		cachep->batchcount = 1;
1451 		cachep->limit = BOOT_CPUCACHE_ENTRIES;
1452 		cachep->free_limit = (1+num_online_cpus())*cachep->batchcount
1453 					+ cachep->num;
1454 	}
1455 
1456 	cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
1457 					((unsigned long)cachep)%REAPTIMEOUT_LIST3;
1458 
1459 	/* Need the semaphore to access the chain. */
1460 	down(&cache_chain_sem);
1461 	{
1462 		struct list_head *p;
1463 		mm_segment_t old_fs;
1464 
1465 		old_fs = get_fs();
1466 		set_fs(KERNEL_DS);
1467 		list_for_each(p, &cache_chain) {
1468 			kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
1469 			char tmp;
1470 			/* This happens when the module gets unloaded and doesn't
1471 			   destroy its slab cache and noone else reuses the vmalloc
1472 			   area of the module. Print a warning. */
1473 			if (__get_user(tmp,pc->name)) {
1474 				printk("SLAB: cache with size %d has lost its name\n",
1475 					pc->objsize);
1476 				continue;
1477 			}
1478 			if (!strcmp(pc->name,name)) {
1479 				printk("kmem_cache_create: duplicate cache %s\n",name);
1480 				up(&cache_chain_sem);
1481 				unlock_cpu_hotplug();
1482 				BUG();
1483 			}
1484 		}
1485 		set_fs(old_fs);
1486 	}
1487 
1488 	/* cache setup completed, link it into the list */
1489 	list_add(&cachep->next, &cache_chain);
1490 	up(&cache_chain_sem);
1491 	unlock_cpu_hotplug();
1492 opps:
1493 	if (!cachep && (flags & SLAB_PANIC))
1494 		panic("kmem_cache_create(): failed to create slab `%s'\n",
1495 			name);
1496 	return cachep;
1497 }
1498 EXPORT_SYMBOL(kmem_cache_create);
1499 
1500 #if DEBUG
1501 static void check_irq_off(void)
1502 {
1503 	BUG_ON(!irqs_disabled());
1504 }
1505 
1506 static void check_irq_on(void)
1507 {
1508 	BUG_ON(irqs_disabled());
1509 }
1510 
1511 static void check_spinlock_acquired(kmem_cache_t *cachep)
1512 {
1513 #ifdef CONFIG_SMP
1514 	check_irq_off();
1515 	BUG_ON(spin_trylock(&cachep->spinlock));
1516 #endif
1517 }
1518 #else
1519 #define check_irq_off()	do { } while(0)
1520 #define check_irq_on()	do { } while(0)
1521 #define check_spinlock_acquired(x) do { } while(0)
1522 #endif
1523 
1524 /*
1525  * Waits for all CPUs to execute func().
1526  */
1527 static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
1528 {
1529 	check_irq_on();
1530 	preempt_disable();
1531 
1532 	local_irq_disable();
1533 	func(arg);
1534 	local_irq_enable();
1535 
1536 	if (smp_call_function(func, arg, 1, 1))
1537 		BUG();
1538 
1539 	preempt_enable();
1540 }
1541 
1542 static void drain_array_locked(kmem_cache_t* cachep,
1543 				struct array_cache *ac, int force);
1544 
1545 static void do_drain(void *arg)
1546 {
1547 	kmem_cache_t *cachep = (kmem_cache_t*)arg;
1548 	struct array_cache *ac;
1549 
1550 	check_irq_off();
1551 	ac = ac_data(cachep);
1552 	spin_lock(&cachep->spinlock);
1553 	free_block(cachep, &ac_entry(ac)[0], ac->avail);
1554 	spin_unlock(&cachep->spinlock);
1555 	ac->avail = 0;
1556 }
1557 
1558 static void drain_cpu_caches(kmem_cache_t *cachep)
1559 {
1560 	smp_call_function_all_cpus(do_drain, cachep);
1561 	check_irq_on();
1562 	spin_lock_irq(&cachep->spinlock);
1563 	if (cachep->lists.shared)
1564 		drain_array_locked(cachep, cachep->lists.shared, 1);
1565 	spin_unlock_irq(&cachep->spinlock);
1566 }
1567 
1568 
1569 /* NUMA shrink all list3s */
1570 static int __cache_shrink(kmem_cache_t *cachep)
1571 {
1572 	struct slab *slabp;
1573 	int ret;
1574 
1575 	drain_cpu_caches(cachep);
1576 
1577 	check_irq_on();
1578 	spin_lock_irq(&cachep->spinlock);
1579 
1580 	for(;;) {
1581 		struct list_head *p;
1582 
1583 		p = cachep->lists.slabs_free.prev;
1584 		if (p == &cachep->lists.slabs_free)
1585 			break;
1586 
1587 		slabp = list_entry(cachep->lists.slabs_free.prev, struct slab, list);
1588 #if DEBUG
1589 		if (slabp->inuse)
1590 			BUG();
1591 #endif
1592 		list_del(&slabp->list);
1593 
1594 		cachep->lists.free_objects -= cachep->num;
1595 		spin_unlock_irq(&cachep->spinlock);
1596 		slab_destroy(cachep, slabp);
1597 		spin_lock_irq(&cachep->spinlock);
1598 	}
1599 	ret = !list_empty(&cachep->lists.slabs_full) ||
1600 		!list_empty(&cachep->lists.slabs_partial);
1601 	spin_unlock_irq(&cachep->spinlock);
1602 	return ret;
1603 }
1604 
1605 /**
1606  * kmem_cache_shrink - Shrink a cache.
1607  * @cachep: The cache to shrink.
1608  *
1609  * Releases as many slabs as possible for a cache.
1610  * To help debugging, a zero exit status indicates all slabs were released.
1611  */
1612 int kmem_cache_shrink(kmem_cache_t *cachep)
1613 {
1614 	if (!cachep || in_interrupt())
1615 		BUG();
1616 
1617 	return __cache_shrink(cachep);
1618 }
1619 EXPORT_SYMBOL(kmem_cache_shrink);
1620 
1621 /**
1622  * kmem_cache_destroy - delete a cache
1623  * @cachep: the cache to destroy
1624  *
1625  * Remove a kmem_cache_t object from the slab cache.
1626  * Returns 0 on success.
1627  *
1628  * It is expected this function will be called by a module when it is
1629  * unloaded.  This will remove the cache completely, and avoid a duplicate
1630  * cache being allocated each time a module is loaded and unloaded, if the
1631  * module doesn't have persistent in-kernel storage across loads and unloads.
1632  *
1633  * The cache must be empty before calling this function.
1634  *
1635  * The caller must guarantee that noone will allocate memory from the cache
1636  * during the kmem_cache_destroy().
1637  */
1638 int kmem_cache_destroy(kmem_cache_t * cachep)
1639 {
1640 	int i;
1641 
1642 	if (!cachep || in_interrupt())
1643 		BUG();
1644 
1645 	/* Don't let CPUs to come and go */
1646 	lock_cpu_hotplug();
1647 
1648 	/* Find the cache in the chain of caches. */
1649 	down(&cache_chain_sem);
1650 	/*
1651 	 * the chain is never empty, cache_cache is never destroyed
1652 	 */
1653 	list_del(&cachep->next);
1654 	up(&cache_chain_sem);
1655 
1656 	if (__cache_shrink(cachep)) {
1657 		slab_error(cachep, "Can't free all objects");
1658 		down(&cache_chain_sem);
1659 		list_add(&cachep->next,&cache_chain);
1660 		up(&cache_chain_sem);
1661 		unlock_cpu_hotplug();
1662 		return 1;
1663 	}
1664 
1665 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1666 		synchronize_kernel();
1667 
1668 	/* no cpu_online check required here since we clear the percpu
1669 	 * array on cpu offline and set this to NULL.
1670 	 */
1671 	for (i = 0; i < NR_CPUS; i++)
1672 		kfree(cachep->array[i]);
1673 
1674 	/* NUMA: free the list3 structures */
1675 	kfree(cachep->lists.shared);
1676 	cachep->lists.shared = NULL;
1677 	kmem_cache_free(&cache_cache, cachep);
1678 
1679 	unlock_cpu_hotplug();
1680 
1681 	return 0;
1682 }
1683 EXPORT_SYMBOL(kmem_cache_destroy);
1684 
1685 /* Get the memory for a slab management obj. */
1686 static struct slab* alloc_slabmgmt(kmem_cache_t *cachep,
1687 			void *objp, int colour_off, unsigned int __nocast local_flags)
1688 {
1689 	struct slab *slabp;
1690 
1691 	if (OFF_SLAB(cachep)) {
1692 		/* Slab management obj is off-slab. */
1693 		slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
1694 		if (!slabp)
1695 			return NULL;
1696 	} else {
1697 		slabp = objp+colour_off;
1698 		colour_off += cachep->slab_size;
1699 	}
1700 	slabp->inuse = 0;
1701 	slabp->colouroff = colour_off;
1702 	slabp->s_mem = objp+colour_off;
1703 
1704 	return slabp;
1705 }
1706 
1707 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
1708 {
1709 	return (kmem_bufctl_t *)(slabp+1);
1710 }
1711 
1712 static void cache_init_objs(kmem_cache_t *cachep,
1713 			struct slab *slabp, unsigned long ctor_flags)
1714 {
1715 	int i;
1716 
1717 	for (i = 0; i < cachep->num; i++) {
1718 		void* objp = slabp->s_mem+cachep->objsize*i;
1719 #if DEBUG
1720 		/* need to poison the objs? */
1721 		if (cachep->flags & SLAB_POISON)
1722 			poison_obj(cachep, objp, POISON_FREE);
1723 		if (cachep->flags & SLAB_STORE_USER)
1724 			*dbg_userword(cachep, objp) = NULL;
1725 
1726 		if (cachep->flags & SLAB_RED_ZONE) {
1727 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
1728 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
1729 		}
1730 		/*
1731 		 * Constructors are not allowed to allocate memory from
1732 		 * the same cache which they are a constructor for.
1733 		 * Otherwise, deadlock. They must also be threaded.
1734 		 */
1735 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
1736 			cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
1737 
1738 		if (cachep->flags & SLAB_RED_ZONE) {
1739 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1740 				slab_error(cachep, "constructor overwrote the"
1741 							" end of an object");
1742 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1743 				slab_error(cachep, "constructor overwrote the"
1744 							" start of an object");
1745 		}
1746 		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
1747 	       		kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
1748 #else
1749 		if (cachep->ctor)
1750 			cachep->ctor(objp, cachep, ctor_flags);
1751 #endif
1752 		slab_bufctl(slabp)[i] = i+1;
1753 	}
1754 	slab_bufctl(slabp)[i-1] = BUFCTL_END;
1755 	slabp->free = 0;
1756 }
1757 
1758 static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
1759 {
1760 	if (flags & SLAB_DMA) {
1761 		if (!(cachep->gfpflags & GFP_DMA))
1762 			BUG();
1763 	} else {
1764 		if (cachep->gfpflags & GFP_DMA)
1765 			BUG();
1766 	}
1767 }
1768 
1769 static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
1770 {
1771 	int i;
1772 	struct page *page;
1773 
1774 	/* Nasty!!!!!! I hope this is OK. */
1775 	i = 1 << cachep->gfporder;
1776 	page = virt_to_page(objp);
1777 	do {
1778 		SET_PAGE_CACHE(page, cachep);
1779 		SET_PAGE_SLAB(page, slabp);
1780 		page++;
1781 	} while (--i);
1782 }
1783 
1784 /*
1785  * Grow (by 1) the number of slabs within a cache.  This is called by
1786  * kmem_cache_alloc() when there are no active objs left in a cache.
1787  */
1788 static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
1789 {
1790 	struct slab	*slabp;
1791 	void		*objp;
1792 	size_t		 offset;
1793 	unsigned int	 local_flags;
1794 	unsigned long	 ctor_flags;
1795 
1796 	/* Be lazy and only check for valid flags here,
1797  	 * keeping it out of the critical path in kmem_cache_alloc().
1798 	 */
1799 	if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
1800 		BUG();
1801 	if (flags & SLAB_NO_GROW)
1802 		return 0;
1803 
1804 	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
1805 	local_flags = (flags & SLAB_LEVEL_MASK);
1806 	if (!(local_flags & __GFP_WAIT))
1807 		/*
1808 		 * Not allowed to sleep.  Need to tell a constructor about
1809 		 * this - it might need to know...
1810 		 */
1811 		ctor_flags |= SLAB_CTOR_ATOMIC;
1812 
1813 	/* About to mess with non-constant members - lock. */
1814 	check_irq_off();
1815 	spin_lock(&cachep->spinlock);
1816 
1817 	/* Get colour for the slab, and cal the next value. */
1818 	offset = cachep->colour_next;
1819 	cachep->colour_next++;
1820 	if (cachep->colour_next >= cachep->colour)
1821 		cachep->colour_next = 0;
1822 	offset *= cachep->colour_off;
1823 
1824 	spin_unlock(&cachep->spinlock);
1825 
1826 	if (local_flags & __GFP_WAIT)
1827 		local_irq_enable();
1828 
1829 	/*
1830 	 * The test for missing atomic flag is performed here, rather than
1831 	 * the more obvious place, simply to reduce the critical path length
1832 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
1833 	 * will eventually be caught here (where it matters).
1834 	 */
1835 	kmem_flagcheck(cachep, flags);
1836 
1837 
1838 	/* Get mem for the objs. */
1839 	if (!(objp = kmem_getpages(cachep, flags, nodeid)))
1840 		goto failed;
1841 
1842 	/* Get slab management. */
1843 	if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
1844 		goto opps1;
1845 
1846 	set_slab_attr(cachep, slabp, objp);
1847 
1848 	cache_init_objs(cachep, slabp, ctor_flags);
1849 
1850 	if (local_flags & __GFP_WAIT)
1851 		local_irq_disable();
1852 	check_irq_off();
1853 	spin_lock(&cachep->spinlock);
1854 
1855 	/* Make slab active. */
1856 	list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_free));
1857 	STATS_INC_GROWN(cachep);
1858 	list3_data(cachep)->free_objects += cachep->num;
1859 	spin_unlock(&cachep->spinlock);
1860 	return 1;
1861 opps1:
1862 	kmem_freepages(cachep, objp);
1863 failed:
1864 	if (local_flags & __GFP_WAIT)
1865 		local_irq_disable();
1866 	return 0;
1867 }
1868 
1869 #if DEBUG
1870 
1871 /*
1872  * Perform extra freeing checks:
1873  * - detect bad pointers.
1874  * - POISON/RED_ZONE checking
1875  * - destructor calls, for caches with POISON+dtor
1876  */
1877 static void kfree_debugcheck(const void *objp)
1878 {
1879 	struct page *page;
1880 
1881 	if (!virt_addr_valid(objp)) {
1882 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
1883 			(unsigned long)objp);
1884 		BUG();
1885 	}
1886 	page = virt_to_page(objp);
1887 	if (!PageSlab(page)) {
1888 		printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
1889 		BUG();
1890 	}
1891 }
1892 
1893 static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
1894 					void *caller)
1895 {
1896 	struct page *page;
1897 	unsigned int objnr;
1898 	struct slab *slabp;
1899 
1900 	objp -= obj_dbghead(cachep);
1901 	kfree_debugcheck(objp);
1902 	page = virt_to_page(objp);
1903 
1904 	if (GET_PAGE_CACHE(page) != cachep) {
1905 		printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
1906 				GET_PAGE_CACHE(page),cachep);
1907 		printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
1908 		printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name);
1909 		WARN_ON(1);
1910 	}
1911 	slabp = GET_PAGE_SLAB(page);
1912 
1913 	if (cachep->flags & SLAB_RED_ZONE) {
1914 		if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
1915 			slab_error(cachep, "double free, or memory outside"
1916 						" object was overwritten");
1917 			printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
1918 					objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
1919 		}
1920 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
1921 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
1922 	}
1923 	if (cachep->flags & SLAB_STORE_USER)
1924 		*dbg_userword(cachep, objp) = caller;
1925 
1926 	objnr = (objp-slabp->s_mem)/cachep->objsize;
1927 
1928 	BUG_ON(objnr >= cachep->num);
1929 	BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
1930 
1931 	if (cachep->flags & SLAB_DEBUG_INITIAL) {
1932 		/* Need to call the slab's constructor so the
1933 		 * caller can perform a verify of its state (debugging).
1934 		 * Called without the cache-lock held.
1935 		 */
1936 		cachep->ctor(objp+obj_dbghead(cachep),
1937 					cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
1938 	}
1939 	if (cachep->flags & SLAB_POISON && cachep->dtor) {
1940 		/* we want to cache poison the object,
1941 		 * call the destruction callback
1942 		 */
1943 		cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
1944 	}
1945 	if (cachep->flags & SLAB_POISON) {
1946 #ifdef CONFIG_DEBUG_PAGEALLOC
1947 		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
1948 			store_stackinfo(cachep, objp, (unsigned long)caller);
1949 	       		kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
1950 		} else {
1951 			poison_obj(cachep, objp, POISON_FREE);
1952 		}
1953 #else
1954 		poison_obj(cachep, objp, POISON_FREE);
1955 #endif
1956 	}
1957 	return objp;
1958 }
1959 
1960 static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
1961 {
1962 	kmem_bufctl_t i;
1963 	int entries = 0;
1964 
1965 	check_spinlock_acquired(cachep);
1966 	/* Check slab's freelist to see if this obj is there. */
1967 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
1968 		entries++;
1969 		if (entries > cachep->num || i >= cachep->num)
1970 			goto bad;
1971 	}
1972 	if (entries != cachep->num - slabp->inuse) {
1973 bad:
1974 		printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
1975 				cachep->name, cachep->num, slabp, slabp->inuse);
1976 		for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
1977 			if ((i%16)==0)
1978 				printk("\n%03x:", i);
1979 			printk(" %02x", ((unsigned char*)slabp)[i]);
1980 		}
1981 		printk("\n");
1982 		BUG();
1983 	}
1984 }
1985 #else
1986 #define kfree_debugcheck(x) do { } while(0)
1987 #define cache_free_debugcheck(x,objp,z) (objp)
1988 #define check_slabp(x,y) do { } while(0)
1989 #endif
1990 
1991 static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags)
1992 {
1993 	int batchcount;
1994 	struct kmem_list3 *l3;
1995 	struct array_cache *ac;
1996 
1997 	check_irq_off();
1998 	ac = ac_data(cachep);
1999 retry:
2000 	batchcount = ac->batchcount;
2001 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2002 		/* if there was little recent activity on this
2003 		 * cache, then perform only a partial refill.
2004 		 * Otherwise we could generate refill bouncing.
2005 		 */
2006 		batchcount = BATCHREFILL_LIMIT;
2007 	}
2008 	l3 = list3_data(cachep);
2009 
2010 	BUG_ON(ac->avail > 0);
2011 	spin_lock(&cachep->spinlock);
2012 	if (l3->shared) {
2013 		struct array_cache *shared_array = l3->shared;
2014 		if (shared_array->avail) {
2015 			if (batchcount > shared_array->avail)
2016 				batchcount = shared_array->avail;
2017 			shared_array->avail -= batchcount;
2018 			ac->avail = batchcount;
2019 			memcpy(ac_entry(ac), &ac_entry(shared_array)[shared_array->avail],
2020 					sizeof(void*)*batchcount);
2021 			shared_array->touched = 1;
2022 			goto alloc_done;
2023 		}
2024 	}
2025 	while (batchcount > 0) {
2026 		struct list_head *entry;
2027 		struct slab *slabp;
2028 		/* Get slab alloc is to come from. */
2029 		entry = l3->slabs_partial.next;
2030 		if (entry == &l3->slabs_partial) {
2031 			l3->free_touched = 1;
2032 			entry = l3->slabs_free.next;
2033 			if (entry == &l3->slabs_free)
2034 				goto must_grow;
2035 		}
2036 
2037 		slabp = list_entry(entry, struct slab, list);
2038 		check_slabp(cachep, slabp);
2039 		check_spinlock_acquired(cachep);
2040 		while (slabp->inuse < cachep->num && batchcount--) {
2041 			kmem_bufctl_t next;
2042 			STATS_INC_ALLOCED(cachep);
2043 			STATS_INC_ACTIVE(cachep);
2044 			STATS_SET_HIGH(cachep);
2045 
2046 			/* get obj pointer */
2047 			ac_entry(ac)[ac->avail++] = slabp->s_mem + slabp->free*cachep->objsize;
2048 
2049 			slabp->inuse++;
2050 			next = slab_bufctl(slabp)[slabp->free];
2051 #if DEBUG
2052 			slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2053 #endif
2054 		       	slabp->free = next;
2055 		}
2056 		check_slabp(cachep, slabp);
2057 
2058 		/* move slabp to correct slabp list: */
2059 		list_del(&slabp->list);
2060 		if (slabp->free == BUFCTL_END)
2061 			list_add(&slabp->list, &l3->slabs_full);
2062 		else
2063 			list_add(&slabp->list, &l3->slabs_partial);
2064 	}
2065 
2066 must_grow:
2067 	l3->free_objects -= ac->avail;
2068 alloc_done:
2069 	spin_unlock(&cachep->spinlock);
2070 
2071 	if (unlikely(!ac->avail)) {
2072 		int x;
2073 		x = cache_grow(cachep, flags, -1);
2074 
2075 		// cache_grow can reenable interrupts, then ac could change.
2076 		ac = ac_data(cachep);
2077 		if (!x && ac->avail == 0)	// no objects in sight? abort
2078 			return NULL;
2079 
2080 		if (!ac->avail)		// objects refilled by interrupt?
2081 			goto retry;
2082 	}
2083 	ac->touched = 1;
2084 	return ac_entry(ac)[--ac->avail];
2085 }
2086 
2087 static inline void
2088 cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
2089 {
2090 	might_sleep_if(flags & __GFP_WAIT);
2091 #if DEBUG
2092 	kmem_flagcheck(cachep, flags);
2093 #endif
2094 }
2095 
2096 #if DEBUG
2097 static void *
2098 cache_alloc_debugcheck_after(kmem_cache_t *cachep,
2099 			unsigned long flags, void *objp, void *caller)
2100 {
2101 	if (!objp)
2102 		return objp;
2103  	if (cachep->flags & SLAB_POISON) {
2104 #ifdef CONFIG_DEBUG_PAGEALLOC
2105 		if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2106 			kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
2107 		else
2108 			check_poison_obj(cachep, objp);
2109 #else
2110 		check_poison_obj(cachep, objp);
2111 #endif
2112 		poison_obj(cachep, objp, POISON_INUSE);
2113 	}
2114 	if (cachep->flags & SLAB_STORE_USER)
2115 		*dbg_userword(cachep, objp) = caller;
2116 
2117 	if (cachep->flags & SLAB_RED_ZONE) {
2118 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2119 			slab_error(cachep, "double free, or memory outside"
2120 						" object was overwritten");
2121 			printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
2122 					objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
2123 		}
2124 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
2125 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
2126 	}
2127 	objp += obj_dbghead(cachep);
2128 	if (cachep->ctor && cachep->flags & SLAB_POISON) {
2129 		unsigned long	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2130 
2131 		if (!(flags & __GFP_WAIT))
2132 			ctor_flags |= SLAB_CTOR_ATOMIC;
2133 
2134 		cachep->ctor(objp, cachep, ctor_flags);
2135 	}
2136 	return objp;
2137 }
2138 #else
2139 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2140 #endif
2141 
2142 
2143 static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2144 {
2145 	unsigned long save_flags;
2146 	void* objp;
2147 	struct array_cache *ac;
2148 
2149 	cache_alloc_debugcheck_before(cachep, flags);
2150 
2151 	local_irq_save(save_flags);
2152 	ac = ac_data(cachep);
2153 	if (likely(ac->avail)) {
2154 		STATS_INC_ALLOCHIT(cachep);
2155 		ac->touched = 1;
2156 		objp = ac_entry(ac)[--ac->avail];
2157 	} else {
2158 		STATS_INC_ALLOCMISS(cachep);
2159 		objp = cache_alloc_refill(cachep, flags);
2160 	}
2161 	local_irq_restore(save_flags);
2162 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0));
2163 	return objp;
2164 }
2165 
2166 /*
2167  * NUMA: different approach needed if the spinlock is moved into
2168  * the l3 structure
2169  */
2170 
2171 static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
2172 {
2173 	int i;
2174 
2175 	check_spinlock_acquired(cachep);
2176 
2177 	/* NUMA: move add into loop */
2178 	cachep->lists.free_objects += nr_objects;
2179 
2180 	for (i = 0; i < nr_objects; i++) {
2181 		void *objp = objpp[i];
2182 		struct slab *slabp;
2183 		unsigned int objnr;
2184 
2185 		slabp = GET_PAGE_SLAB(virt_to_page(objp));
2186 		list_del(&slabp->list);
2187 		objnr = (objp - slabp->s_mem) / cachep->objsize;
2188 		check_slabp(cachep, slabp);
2189 #if DEBUG
2190 		if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
2191 			printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n",
2192 						cachep->name, objp);
2193 			BUG();
2194 		}
2195 #endif
2196 		slab_bufctl(slabp)[objnr] = slabp->free;
2197 		slabp->free = objnr;
2198 		STATS_DEC_ACTIVE(cachep);
2199 		slabp->inuse--;
2200 		check_slabp(cachep, slabp);
2201 
2202 		/* fixup slab chains */
2203 		if (slabp->inuse == 0) {
2204 			if (cachep->lists.free_objects > cachep->free_limit) {
2205 				cachep->lists.free_objects -= cachep->num;
2206 				slab_destroy(cachep, slabp);
2207 			} else {
2208 				list_add(&slabp->list,
2209 				&list3_data_ptr(cachep, objp)->slabs_free);
2210 			}
2211 		} else {
2212 			/* Unconditionally move a slab to the end of the
2213 			 * partial list on free - maximum time for the
2214 			 * other objects to be freed, too.
2215 			 */
2216 			list_add_tail(&slabp->list,
2217 				&list3_data_ptr(cachep, objp)->slabs_partial);
2218 		}
2219 	}
2220 }
2221 
2222 static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
2223 {
2224 	int batchcount;
2225 
2226 	batchcount = ac->batchcount;
2227 #if DEBUG
2228 	BUG_ON(!batchcount || batchcount > ac->avail);
2229 #endif
2230 	check_irq_off();
2231 	spin_lock(&cachep->spinlock);
2232 	if (cachep->lists.shared) {
2233 		struct array_cache *shared_array = cachep->lists.shared;
2234 		int max = shared_array->limit-shared_array->avail;
2235 		if (max) {
2236 			if (batchcount > max)
2237 				batchcount = max;
2238 			memcpy(&ac_entry(shared_array)[shared_array->avail],
2239 					&ac_entry(ac)[0],
2240 					sizeof(void*)*batchcount);
2241 			shared_array->avail += batchcount;
2242 			goto free_done;
2243 		}
2244 	}
2245 
2246 	free_block(cachep, &ac_entry(ac)[0], batchcount);
2247 free_done:
2248 #if STATS
2249 	{
2250 		int i = 0;
2251 		struct list_head *p;
2252 
2253 		p = list3_data(cachep)->slabs_free.next;
2254 		while (p != &(list3_data(cachep)->slabs_free)) {
2255 			struct slab *slabp;
2256 
2257 			slabp = list_entry(p, struct slab, list);
2258 			BUG_ON(slabp->inuse);
2259 
2260 			i++;
2261 			p = p->next;
2262 		}
2263 		STATS_SET_FREEABLE(cachep, i);
2264 	}
2265 #endif
2266 	spin_unlock(&cachep->spinlock);
2267 	ac->avail -= batchcount;
2268 	memmove(&ac_entry(ac)[0], &ac_entry(ac)[batchcount],
2269 			sizeof(void*)*ac->avail);
2270 }
2271 
2272 /*
2273  * __cache_free
2274  * Release an obj back to its cache. If the obj has a constructed
2275  * state, it must be in this state _before_ it is released.
2276  *
2277  * Called with disabled ints.
2278  */
2279 static inline void __cache_free(kmem_cache_t *cachep, void *objp)
2280 {
2281 	struct array_cache *ac = ac_data(cachep);
2282 
2283 	check_irq_off();
2284 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
2285 
2286 	if (likely(ac->avail < ac->limit)) {
2287 		STATS_INC_FREEHIT(cachep);
2288 		ac_entry(ac)[ac->avail++] = objp;
2289 		return;
2290 	} else {
2291 		STATS_INC_FREEMISS(cachep);
2292 		cache_flusharray(cachep, ac);
2293 		ac_entry(ac)[ac->avail++] = objp;
2294 	}
2295 }
2296 
2297 /**
2298  * kmem_cache_alloc - Allocate an object
2299  * @cachep: The cache to allocate from.
2300  * @flags: See kmalloc().
2301  *
2302  * Allocate an object from this cache.  The flags are only relevant
2303  * if the cache has no available objects.
2304  */
2305 void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
2306 {
2307 	return __cache_alloc(cachep, flags);
2308 }
2309 EXPORT_SYMBOL(kmem_cache_alloc);
2310 
2311 /**
2312  * kmem_ptr_validate - check if an untrusted pointer might
2313  *	be a slab entry.
2314  * @cachep: the cache we're checking against
2315  * @ptr: pointer to validate
2316  *
2317  * This verifies that the untrusted pointer looks sane:
2318  * it is _not_ a guarantee that the pointer is actually
2319  * part of the slab cache in question, but it at least
2320  * validates that the pointer can be dereferenced and
2321  * looks half-way sane.
2322  *
2323  * Currently only used for dentry validation.
2324  */
2325 int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
2326 {
2327 	unsigned long addr = (unsigned long) ptr;
2328 	unsigned long min_addr = PAGE_OFFSET;
2329 	unsigned long align_mask = BYTES_PER_WORD-1;
2330 	unsigned long size = cachep->objsize;
2331 	struct page *page;
2332 
2333 	if (unlikely(addr < min_addr))
2334 		goto out;
2335 	if (unlikely(addr > (unsigned long)high_memory - size))
2336 		goto out;
2337 	if (unlikely(addr & align_mask))
2338 		goto out;
2339 	if (unlikely(!kern_addr_valid(addr)))
2340 		goto out;
2341 	if (unlikely(!kern_addr_valid(addr + size - 1)))
2342 		goto out;
2343 	page = virt_to_page(ptr);
2344 	if (unlikely(!PageSlab(page)))
2345 		goto out;
2346 	if (unlikely(GET_PAGE_CACHE(page) != cachep))
2347 		goto out;
2348 	return 1;
2349 out:
2350 	return 0;
2351 }
2352 
2353 #ifdef CONFIG_NUMA
2354 /**
2355  * kmem_cache_alloc_node - Allocate an object on the specified node
2356  * @cachep: The cache to allocate from.
2357  * @flags: See kmalloc().
2358  * @nodeid: node number of the target node.
2359  *
2360  * Identical to kmem_cache_alloc, except that this function is slow
2361  * and can sleep. And it will allocate memory on the given node, which
2362  * can improve the performance for cpu bound structures.
2363  */
2364 void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
2365 {
2366 	int loop;
2367 	void *objp;
2368 	struct slab *slabp;
2369 	kmem_bufctl_t next;
2370 
2371 	for (loop = 0;;loop++) {
2372 		struct list_head *q;
2373 
2374 		objp = NULL;
2375 		check_irq_on();
2376 		spin_lock_irq(&cachep->spinlock);
2377 		/* walk through all partial and empty slab and find one
2378 		 * from the right node */
2379 		list_for_each(q,&cachep->lists.slabs_partial) {
2380 			slabp = list_entry(q, struct slab, list);
2381 
2382 			if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
2383 					loop > 2)
2384 				goto got_slabp;
2385 		}
2386 		list_for_each(q, &cachep->lists.slabs_free) {
2387 			slabp = list_entry(q, struct slab, list);
2388 
2389 			if (page_to_nid(virt_to_page(slabp->s_mem)) == nodeid ||
2390 					loop > 2)
2391 				goto got_slabp;
2392 		}
2393 		spin_unlock_irq(&cachep->spinlock);
2394 
2395 		local_irq_disable();
2396 		if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
2397 			local_irq_enable();
2398 			return NULL;
2399 		}
2400 		local_irq_enable();
2401 	}
2402 got_slabp:
2403 	/* found one: allocate object */
2404 	check_slabp(cachep, slabp);
2405 	check_spinlock_acquired(cachep);
2406 
2407 	STATS_INC_ALLOCED(cachep);
2408 	STATS_INC_ACTIVE(cachep);
2409 	STATS_SET_HIGH(cachep);
2410 	STATS_INC_NODEALLOCS(cachep);
2411 
2412 	objp = slabp->s_mem + slabp->free*cachep->objsize;
2413 
2414 	slabp->inuse++;
2415 	next = slab_bufctl(slabp)[slabp->free];
2416 #if DEBUG
2417 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2418 #endif
2419 	slabp->free = next;
2420 	check_slabp(cachep, slabp);
2421 
2422 	/* move slabp to correct slabp list: */
2423 	list_del(&slabp->list);
2424 	if (slabp->free == BUFCTL_END)
2425 		list_add(&slabp->list, &cachep->lists.slabs_full);
2426 	else
2427 		list_add(&slabp->list, &cachep->lists.slabs_partial);
2428 
2429 	list3_data(cachep)->free_objects--;
2430 	spin_unlock_irq(&cachep->spinlock);
2431 
2432 	objp = cache_alloc_debugcheck_after(cachep, GFP_KERNEL, objp,
2433 					__builtin_return_address(0));
2434 	return objp;
2435 }
2436 EXPORT_SYMBOL(kmem_cache_alloc_node);
2437 
2438 #endif
2439 
2440 /**
2441  * kmalloc - allocate memory
2442  * @size: how many bytes of memory are required.
2443  * @flags: the type of memory to allocate.
2444  *
2445  * kmalloc is the normal method of allocating memory
2446  * in the kernel.
2447  *
2448  * The @flags argument may be one of:
2449  *
2450  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
2451  *
2452  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
2453  *
2454  * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers.
2455  *
2456  * Additionally, the %GFP_DMA flag may be set to indicate the memory
2457  * must be suitable for DMA.  This can mean different things on different
2458  * platforms.  For example, on i386, it means that the memory must come
2459  * from the first 16MB.
2460  */
2461 void *__kmalloc(size_t size, unsigned int __nocast flags)
2462 {
2463 	kmem_cache_t *cachep;
2464 
2465 	cachep = kmem_find_general_cachep(size, flags);
2466 	if (unlikely(cachep == NULL))
2467 		return NULL;
2468 	return __cache_alloc(cachep, flags);
2469 }
2470 EXPORT_SYMBOL(__kmalloc);
2471 
2472 #ifdef CONFIG_SMP
2473 /**
2474  * __alloc_percpu - allocate one copy of the object for every present
2475  * cpu in the system, zeroing them.
2476  * Objects should be dereferenced using the per_cpu_ptr macro only.
2477  *
2478  * @size: how many bytes of memory are required.
2479  * @align: the alignment, which can't be greater than SMP_CACHE_BYTES.
2480  */
2481 void *__alloc_percpu(size_t size, size_t align)
2482 {
2483 	int i;
2484 	struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
2485 
2486 	if (!pdata)
2487 		return NULL;
2488 
2489 	for (i = 0; i < NR_CPUS; i++) {
2490 		if (!cpu_possible(i))
2491 			continue;
2492 		pdata->ptrs[i] = kmem_cache_alloc_node(
2493 				kmem_find_general_cachep(size, GFP_KERNEL),
2494 				cpu_to_node(i));
2495 
2496 		if (!pdata->ptrs[i])
2497 			goto unwind_oom;
2498 		memset(pdata->ptrs[i], 0, size);
2499 	}
2500 
2501 	/* Catch derefs w/o wrappers */
2502 	return (void *) (~(unsigned long) pdata);
2503 
2504 unwind_oom:
2505 	while (--i >= 0) {
2506 		if (!cpu_possible(i))
2507 			continue;
2508 		kfree(pdata->ptrs[i]);
2509 	}
2510 	kfree(pdata);
2511 	return NULL;
2512 }
2513 EXPORT_SYMBOL(__alloc_percpu);
2514 #endif
2515 
2516 /**
2517  * kmem_cache_free - Deallocate an object
2518  * @cachep: The cache the allocation was from.
2519  * @objp: The previously allocated object.
2520  *
2521  * Free an object which was previously allocated from this
2522  * cache.
2523  */
2524 void kmem_cache_free(kmem_cache_t *cachep, void *objp)
2525 {
2526 	unsigned long flags;
2527 
2528 	local_irq_save(flags);
2529 	__cache_free(cachep, objp);
2530 	local_irq_restore(flags);
2531 }
2532 EXPORT_SYMBOL(kmem_cache_free);
2533 
2534 /**
2535  * kcalloc - allocate memory for an array. The memory is set to zero.
2536  * @n: number of elements.
2537  * @size: element size.
2538  * @flags: the type of memory to allocate.
2539  */
2540 void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
2541 {
2542 	void *ret = NULL;
2543 
2544 	if (n != 0 && size > INT_MAX / n)
2545 		return ret;
2546 
2547 	ret = kmalloc(n * size, flags);
2548 	if (ret)
2549 		memset(ret, 0, n * size);
2550 	return ret;
2551 }
2552 EXPORT_SYMBOL(kcalloc);
2553 
2554 /**
2555  * kfree - free previously allocated memory
2556  * @objp: pointer returned by kmalloc.
2557  *
2558  * Don't free memory not originally allocated by kmalloc()
2559  * or you will run into trouble.
2560  */
2561 void kfree(const void *objp)
2562 {
2563 	kmem_cache_t *c;
2564 	unsigned long flags;
2565 
2566 	if (unlikely(!objp))
2567 		return;
2568 	local_irq_save(flags);
2569 	kfree_debugcheck(objp);
2570 	c = GET_PAGE_CACHE(virt_to_page(objp));
2571 	__cache_free(c, (void*)objp);
2572 	local_irq_restore(flags);
2573 }
2574 EXPORT_SYMBOL(kfree);
2575 
2576 #ifdef CONFIG_SMP
2577 /**
2578  * free_percpu - free previously allocated percpu memory
2579  * @objp: pointer returned by alloc_percpu.
2580  *
2581  * Don't free memory not originally allocated by alloc_percpu()
2582  * The complemented objp is to check for that.
2583  */
2584 void
2585 free_percpu(const void *objp)
2586 {
2587 	int i;
2588 	struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
2589 
2590 	for (i = 0; i < NR_CPUS; i++) {
2591 		if (!cpu_possible(i))
2592 			continue;
2593 		kfree(p->ptrs[i]);
2594 	}
2595 	kfree(p);
2596 }
2597 EXPORT_SYMBOL(free_percpu);
2598 #endif
2599 
2600 unsigned int kmem_cache_size(kmem_cache_t *cachep)
2601 {
2602 	return obj_reallen(cachep);
2603 }
2604 EXPORT_SYMBOL(kmem_cache_size);
2605 
2606 struct ccupdate_struct {
2607 	kmem_cache_t *cachep;
2608 	struct array_cache *new[NR_CPUS];
2609 };
2610 
2611 static void do_ccupdate_local(void *info)
2612 {
2613 	struct ccupdate_struct *new = (struct ccupdate_struct *)info;
2614 	struct array_cache *old;
2615 
2616 	check_irq_off();
2617 	old = ac_data(new->cachep);
2618 
2619 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
2620 	new->new[smp_processor_id()] = old;
2621 }
2622 
2623 
2624 static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
2625 				int shared)
2626 {
2627 	struct ccupdate_struct new;
2628 	struct array_cache *new_shared;
2629 	int i;
2630 
2631 	memset(&new.new,0,sizeof(new.new));
2632 	for (i = 0; i < NR_CPUS; i++) {
2633 		if (cpu_online(i)) {
2634 			new.new[i] = alloc_arraycache(i, limit, batchcount);
2635 			if (!new.new[i]) {
2636 				for (i--; i >= 0; i--) kfree(new.new[i]);
2637 				return -ENOMEM;
2638 			}
2639 		} else {
2640 			new.new[i] = NULL;
2641 		}
2642 	}
2643 	new.cachep = cachep;
2644 
2645 	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
2646 
2647 	check_irq_on();
2648 	spin_lock_irq(&cachep->spinlock);
2649 	cachep->batchcount = batchcount;
2650 	cachep->limit = limit;
2651 	cachep->free_limit = (1+num_online_cpus())*cachep->batchcount + cachep->num;
2652 	spin_unlock_irq(&cachep->spinlock);
2653 
2654 	for (i = 0; i < NR_CPUS; i++) {
2655 		struct array_cache *ccold = new.new[i];
2656 		if (!ccold)
2657 			continue;
2658 		spin_lock_irq(&cachep->spinlock);
2659 		free_block(cachep, ac_entry(ccold), ccold->avail);
2660 		spin_unlock_irq(&cachep->spinlock);
2661 		kfree(ccold);
2662 	}
2663 	new_shared = alloc_arraycache(-1, batchcount*shared, 0xbaadf00d);
2664 	if (new_shared) {
2665 		struct array_cache *old;
2666 
2667 		spin_lock_irq(&cachep->spinlock);
2668 		old = cachep->lists.shared;
2669 		cachep->lists.shared = new_shared;
2670 		if (old)
2671 			free_block(cachep, ac_entry(old), old->avail);
2672 		spin_unlock_irq(&cachep->spinlock);
2673 		kfree(old);
2674 	}
2675 
2676 	return 0;
2677 }
2678 
2679 
2680 static void enable_cpucache(kmem_cache_t *cachep)
2681 {
2682 	int err;
2683 	int limit, shared;
2684 
2685 	/* The head array serves three purposes:
2686 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
2687 	 * - reduce the number of spinlock operations.
2688 	 * - reduce the number of linked list operations on the slab and
2689 	 *   bufctl chains: array operations are cheaper.
2690 	 * The numbers are guessed, we should auto-tune as described by
2691 	 * Bonwick.
2692 	 */
2693 	if (cachep->objsize > 131072)
2694 		limit = 1;
2695 	else if (cachep->objsize > PAGE_SIZE)
2696 		limit = 8;
2697 	else if (cachep->objsize > 1024)
2698 		limit = 24;
2699 	else if (cachep->objsize > 256)
2700 		limit = 54;
2701 	else
2702 		limit = 120;
2703 
2704 	/* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
2705 	 * allocation behaviour: Most allocs on one cpu, most free operations
2706 	 * on another cpu. For these cases, an efficient object passing between
2707 	 * cpus is necessary. This is provided by a shared array. The array
2708 	 * replaces Bonwick's magazine layer.
2709 	 * On uniprocessor, it's functionally equivalent (but less efficient)
2710 	 * to a larger limit. Thus disabled by default.
2711 	 */
2712 	shared = 0;
2713 #ifdef CONFIG_SMP
2714 	if (cachep->objsize <= PAGE_SIZE)
2715 		shared = 8;
2716 #endif
2717 
2718 #if DEBUG
2719 	/* With debugging enabled, large batchcount lead to excessively
2720 	 * long periods with disabled local interrupts. Limit the
2721 	 * batchcount
2722 	 */
2723 	if (limit > 32)
2724 		limit = 32;
2725 #endif
2726 	err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
2727 	if (err)
2728 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
2729 					cachep->name, -err);
2730 }
2731 
2732 static void drain_array_locked(kmem_cache_t *cachep,
2733 				struct array_cache *ac, int force)
2734 {
2735 	int tofree;
2736 
2737 	check_spinlock_acquired(cachep);
2738 	if (ac->touched && !force) {
2739 		ac->touched = 0;
2740 	} else if (ac->avail) {
2741 		tofree = force ? ac->avail : (ac->limit+4)/5;
2742 		if (tofree > ac->avail) {
2743 			tofree = (ac->avail+1)/2;
2744 		}
2745 		free_block(cachep, ac_entry(ac), tofree);
2746 		ac->avail -= tofree;
2747 		memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree],
2748 					sizeof(void*)*ac->avail);
2749 	}
2750 }
2751 
2752 /**
2753  * cache_reap - Reclaim memory from caches.
2754  *
2755  * Called from workqueue/eventd every few seconds.
2756  * Purpose:
2757  * - clear the per-cpu caches for this CPU.
2758  * - return freeable pages to the main free memory pool.
2759  *
2760  * If we cannot acquire the cache chain semaphore then just give up - we'll
2761  * try again on the next iteration.
2762  */
2763 static void cache_reap(void *unused)
2764 {
2765 	struct list_head *walk;
2766 
2767 	if (down_trylock(&cache_chain_sem)) {
2768 		/* Give up. Setup the next iteration. */
2769 		schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
2770 		return;
2771 	}
2772 
2773 	list_for_each(walk, &cache_chain) {
2774 		kmem_cache_t *searchp;
2775 		struct list_head* p;
2776 		int tofree;
2777 		struct slab *slabp;
2778 
2779 		searchp = list_entry(walk, kmem_cache_t, next);
2780 
2781 		if (searchp->flags & SLAB_NO_REAP)
2782 			goto next;
2783 
2784 		check_irq_on();
2785 
2786 		spin_lock_irq(&searchp->spinlock);
2787 
2788 		drain_array_locked(searchp, ac_data(searchp), 0);
2789 
2790 		if(time_after(searchp->lists.next_reap, jiffies))
2791 			goto next_unlock;
2792 
2793 		searchp->lists.next_reap = jiffies + REAPTIMEOUT_LIST3;
2794 
2795 		if (searchp->lists.shared)
2796 			drain_array_locked(searchp, searchp->lists.shared, 0);
2797 
2798 		if (searchp->lists.free_touched) {
2799 			searchp->lists.free_touched = 0;
2800 			goto next_unlock;
2801 		}
2802 
2803 		tofree = (searchp->free_limit+5*searchp->num-1)/(5*searchp->num);
2804 		do {
2805 			p = list3_data(searchp)->slabs_free.next;
2806 			if (p == &(list3_data(searchp)->slabs_free))
2807 				break;
2808 
2809 			slabp = list_entry(p, struct slab, list);
2810 			BUG_ON(slabp->inuse);
2811 			list_del(&slabp->list);
2812 			STATS_INC_REAPED(searchp);
2813 
2814 			/* Safe to drop the lock. The slab is no longer
2815 			 * linked to the cache.
2816 			 * searchp cannot disappear, we hold
2817 			 * cache_chain_lock
2818 			 */
2819 			searchp->lists.free_objects -= searchp->num;
2820 			spin_unlock_irq(&searchp->spinlock);
2821 			slab_destroy(searchp, slabp);
2822 			spin_lock_irq(&searchp->spinlock);
2823 		} while(--tofree > 0);
2824 next_unlock:
2825 		spin_unlock_irq(&searchp->spinlock);
2826 next:
2827 		cond_resched();
2828 	}
2829 	check_irq_on();
2830 	up(&cache_chain_sem);
2831 	/* Setup the next iteration */
2832 	schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id());
2833 }
2834 
2835 #ifdef CONFIG_PROC_FS
2836 
2837 static void *s_start(struct seq_file *m, loff_t *pos)
2838 {
2839 	loff_t n = *pos;
2840 	struct list_head *p;
2841 
2842 	down(&cache_chain_sem);
2843 	if (!n) {
2844 		/*
2845 		 * Output format version, so at least we can change it
2846 		 * without _too_ many complaints.
2847 		 */
2848 #if STATS
2849 		seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
2850 #else
2851 		seq_puts(m, "slabinfo - version: 2.1\n");
2852 #endif
2853 		seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
2854 		seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
2855 		seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
2856 #if STATS
2857 		seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
2858 				" <error> <maxfreeable> <freelimit> <nodeallocs>");
2859 		seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
2860 #endif
2861 		seq_putc(m, '\n');
2862 	}
2863 	p = cache_chain.next;
2864 	while (n--) {
2865 		p = p->next;
2866 		if (p == &cache_chain)
2867 			return NULL;
2868 	}
2869 	return list_entry(p, kmem_cache_t, next);
2870 }
2871 
2872 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2873 {
2874 	kmem_cache_t *cachep = p;
2875 	++*pos;
2876 	return cachep->next.next == &cache_chain ? NULL
2877 		: list_entry(cachep->next.next, kmem_cache_t, next);
2878 }
2879 
2880 static void s_stop(struct seq_file *m, void *p)
2881 {
2882 	up(&cache_chain_sem);
2883 }
2884 
2885 static int s_show(struct seq_file *m, void *p)
2886 {
2887 	kmem_cache_t *cachep = p;
2888 	struct list_head *q;
2889 	struct slab	*slabp;
2890 	unsigned long	active_objs;
2891 	unsigned long	num_objs;
2892 	unsigned long	active_slabs = 0;
2893 	unsigned long	num_slabs;
2894 	const char *name;
2895 	char *error = NULL;
2896 
2897 	check_irq_on();
2898 	spin_lock_irq(&cachep->spinlock);
2899 	active_objs = 0;
2900 	num_slabs = 0;
2901 	list_for_each(q,&cachep->lists.slabs_full) {
2902 		slabp = list_entry(q, struct slab, list);
2903 		if (slabp->inuse != cachep->num && !error)
2904 			error = "slabs_full accounting error";
2905 		active_objs += cachep->num;
2906 		active_slabs++;
2907 	}
2908 	list_for_each(q,&cachep->lists.slabs_partial) {
2909 		slabp = list_entry(q, struct slab, list);
2910 		if (slabp->inuse == cachep->num && !error)
2911 			error = "slabs_partial inuse accounting error";
2912 		if (!slabp->inuse && !error)
2913 			error = "slabs_partial/inuse accounting error";
2914 		active_objs += slabp->inuse;
2915 		active_slabs++;
2916 	}
2917 	list_for_each(q,&cachep->lists.slabs_free) {
2918 		slabp = list_entry(q, struct slab, list);
2919 		if (slabp->inuse && !error)
2920 			error = "slabs_free/inuse accounting error";
2921 		num_slabs++;
2922 	}
2923 	num_slabs+=active_slabs;
2924 	num_objs = num_slabs*cachep->num;
2925 	if (num_objs - active_objs != cachep->lists.free_objects && !error)
2926 		error = "free_objects accounting error";
2927 
2928 	name = cachep->name;
2929 	if (error)
2930 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
2931 
2932 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
2933 		name, active_objs, num_objs, cachep->objsize,
2934 		cachep->num, (1<<cachep->gfporder));
2935 	seq_printf(m, " : tunables %4u %4u %4u",
2936 			cachep->limit, cachep->batchcount,
2937 			cachep->lists.shared->limit/cachep->batchcount);
2938 	seq_printf(m, " : slabdata %6lu %6lu %6u",
2939 			active_slabs, num_slabs, cachep->lists.shared->avail);
2940 #if STATS
2941 	{	/* list3 stats */
2942 		unsigned long high = cachep->high_mark;
2943 		unsigned long allocs = cachep->num_allocations;
2944 		unsigned long grown = cachep->grown;
2945 		unsigned long reaped = cachep->reaped;
2946 		unsigned long errors = cachep->errors;
2947 		unsigned long max_freeable = cachep->max_freeable;
2948 		unsigned long free_limit = cachep->free_limit;
2949 		unsigned long node_allocs = cachep->node_allocs;
2950 
2951 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu",
2952 				allocs, high, grown, reaped, errors,
2953 				max_freeable, free_limit, node_allocs);
2954 	}
2955 	/* cpu stats */
2956 	{
2957 		unsigned long allochit = atomic_read(&cachep->allochit);
2958 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
2959 		unsigned long freehit = atomic_read(&cachep->freehit);
2960 		unsigned long freemiss = atomic_read(&cachep->freemiss);
2961 
2962 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
2963 			allochit, allocmiss, freehit, freemiss);
2964 	}
2965 #endif
2966 	seq_putc(m, '\n');
2967 	spin_unlock_irq(&cachep->spinlock);
2968 	return 0;
2969 }
2970 
2971 /*
2972  * slabinfo_op - iterator that generates /proc/slabinfo
2973  *
2974  * Output layout:
2975  * cache-name
2976  * num-active-objs
2977  * total-objs
2978  * object size
2979  * num-active-slabs
2980  * total-slabs
2981  * num-pages-per-slab
2982  * + further values on SMP and with statistics enabled
2983  */
2984 
2985 struct seq_operations slabinfo_op = {
2986 	.start	= s_start,
2987 	.next	= s_next,
2988 	.stop	= s_stop,
2989 	.show	= s_show,
2990 };
2991 
2992 #define MAX_SLABINFO_WRITE 128
2993 /**
2994  * slabinfo_write - Tuning for the slab allocator
2995  * @file: unused
2996  * @buffer: user buffer
2997  * @count: data length
2998  * @ppos: unused
2999  */
3000 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
3001 				size_t count, loff_t *ppos)
3002 {
3003 	char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
3004 	int limit, batchcount, shared, res;
3005 	struct list_head *p;
3006 
3007 	if (count > MAX_SLABINFO_WRITE)
3008 		return -EINVAL;
3009 	if (copy_from_user(&kbuf, buffer, count))
3010 		return -EFAULT;
3011 	kbuf[MAX_SLABINFO_WRITE] = '\0';
3012 
3013 	tmp = strchr(kbuf, ' ');
3014 	if (!tmp)
3015 		return -EINVAL;
3016 	*tmp = '\0';
3017 	tmp++;
3018 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
3019 		return -EINVAL;
3020 
3021 	/* Find the cache in the chain of caches. */
3022 	down(&cache_chain_sem);
3023 	res = -EINVAL;
3024 	list_for_each(p,&cache_chain) {
3025 		kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
3026 
3027 		if (!strcmp(cachep->name, kbuf)) {
3028 			if (limit < 1 ||
3029 			    batchcount < 1 ||
3030 			    batchcount > limit ||
3031 			    shared < 0) {
3032 				res = -EINVAL;
3033 			} else {
3034 				res = do_tune_cpucache(cachep, limit, batchcount, shared);
3035 			}
3036 			break;
3037 		}
3038 	}
3039 	up(&cache_chain_sem);
3040 	if (res >= 0)
3041 		res = count;
3042 	return res;
3043 }
3044 #endif
3045 
3046 unsigned int ksize(const void *objp)
3047 {
3048 	kmem_cache_t *c;
3049 	unsigned long flags;
3050 	unsigned int size = 0;
3051 
3052 	if (likely(objp != NULL)) {
3053 		local_irq_save(flags);
3054 		c = GET_PAGE_CACHE(virt_to_page(objp));
3055 		size = kmem_cache_size(c);
3056 		local_irq_restore(flags);
3057 	}
3058 
3059 	return size;
3060 }
3061