xref: /openbmc/linux/mm/slab.c (revision ee89bd6b)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'slab_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<net/sock.h>
121 
122 #include	<asm/cacheflush.h>
123 #include	<asm/tlbflush.h>
124 #include	<asm/page.h>
125 
126 #include <trace/events/kmem.h>
127 
128 #include	"internal.h"
129 
130 #include	"slab.h"
131 
132 /*
133  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
134  *		  0 for faster, smaller code (especially in the critical paths).
135  *
136  * STATS	- 1 to collect stats for /proc/slabinfo.
137  *		  0 for faster, smaller code (especially in the critical paths).
138  *
139  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140  */
141 
142 #ifdef CONFIG_DEBUG_SLAB
143 #define	DEBUG		1
144 #define	STATS		1
145 #define	FORCED_DEBUG	1
146 #else
147 #define	DEBUG		0
148 #define	STATS		0
149 #define	FORCED_DEBUG	0
150 #endif
151 
152 /* Shouldn't this be in a header file somewhere? */
153 #define	BYTES_PER_WORD		sizeof(void *)
154 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
155 
156 #ifndef ARCH_KMALLOC_FLAGS
157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158 #endif
159 
160 /*
161  * true if a page was allocated from pfmemalloc reserves for network-based
162  * swap
163  */
164 static bool pfmemalloc_active __read_mostly;
165 
166 /*
167  * kmem_bufctl_t:
168  *
169  * Bufctl's are used for linking objs within a slab
170  * linked offsets.
171  *
172  * This implementation relies on "struct page" for locating the cache &
173  * slab an object belongs to.
174  * This allows the bufctl structure to be small (one int), but limits
175  * the number of objects a slab (not a cache) can contain when off-slab
176  * bufctls are used. The limit is the size of the largest general cache
177  * that does not use off-slab slabs.
178  * For 32bit archs with 4 kB pages, is this 56.
179  * This is not serious, as it is only for large objects, when it is unwise
180  * to have too many per slab.
181  * Note: This limit can be raised by introducing a general cache whose size
182  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
183  */
184 
185 typedef unsigned int kmem_bufctl_t;
186 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
187 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
188 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
189 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
190 
191 /*
192  * struct slab_rcu
193  *
194  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
195  * arrange for kmem_freepages to be called via RCU.  This is useful if
196  * we need to approach a kernel structure obliquely, from its address
197  * obtained without the usual locking.  We can lock the structure to
198  * stabilize it and check it's still at the given address, only if we
199  * can be sure that the memory has not been meanwhile reused for some
200  * other kind of object (which our subsystem's lock might corrupt).
201  *
202  * rcu_read_lock before reading the address, then rcu_read_unlock after
203  * taking the spinlock within the structure expected at that address.
204  */
205 struct slab_rcu {
206 	struct rcu_head head;
207 	struct kmem_cache *cachep;
208 	void *addr;
209 };
210 
211 /*
212  * struct slab
213  *
214  * Manages the objs in a slab. Placed either at the beginning of mem allocated
215  * for a slab, or allocated from an general cache.
216  * Slabs are chained into three list: fully used, partial, fully free slabs.
217  */
218 struct slab {
219 	union {
220 		struct {
221 			struct list_head list;
222 			unsigned long colouroff;
223 			void *s_mem;		/* including colour offset */
224 			unsigned int inuse;	/* num of objs active in slab */
225 			kmem_bufctl_t free;
226 			unsigned short nodeid;
227 		};
228 		struct slab_rcu __slab_cover_slab_rcu;
229 	};
230 };
231 
232 /*
233  * struct array_cache
234  *
235  * Purpose:
236  * - LIFO ordering, to hand out cache-warm objects from _alloc
237  * - reduce the number of linked list operations
238  * - reduce spinlock operations
239  *
240  * The limit is stored in the per-cpu structure to reduce the data cache
241  * footprint.
242  *
243  */
244 struct array_cache {
245 	unsigned int avail;
246 	unsigned int limit;
247 	unsigned int batchcount;
248 	unsigned int touched;
249 	spinlock_t lock;
250 	void *entry[];	/*
251 			 * Must have this definition in here for the proper
252 			 * alignment of array_cache. Also simplifies accessing
253 			 * the entries.
254 			 *
255 			 * Entries should not be directly dereferenced as
256 			 * entries belonging to slabs marked pfmemalloc will
257 			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
258 			 */
259 };
260 
261 #define SLAB_OBJ_PFMEMALLOC	1
262 static inline bool is_obj_pfmemalloc(void *objp)
263 {
264 	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
265 }
266 
267 static inline void set_obj_pfmemalloc(void **objp)
268 {
269 	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
270 	return;
271 }
272 
273 static inline void clear_obj_pfmemalloc(void **objp)
274 {
275 	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
276 }
277 
278 /*
279  * bootstrap: The caches do not work without cpuarrays anymore, but the
280  * cpuarrays are allocated from the generic caches...
281  */
282 #define BOOT_CPUCACHE_ENTRIES	1
283 struct arraycache_init {
284 	struct array_cache cache;
285 	void *entries[BOOT_CPUCACHE_ENTRIES];
286 };
287 
288 /*
289  * Need this for bootstrapping a per node allocator.
290  */
291 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
292 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
293 #define	CACHE_CACHE 0
294 #define	SIZE_AC MAX_NUMNODES
295 #define	SIZE_NODE (2 * MAX_NUMNODES)
296 
297 static int drain_freelist(struct kmem_cache *cache,
298 			struct kmem_cache_node *n, int tofree);
299 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
300 			int node);
301 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
302 static void cache_reap(struct work_struct *unused);
303 
304 static int slab_early_init = 1;
305 
306 #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
307 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
308 
309 static void kmem_cache_node_init(struct kmem_cache_node *parent)
310 {
311 	INIT_LIST_HEAD(&parent->slabs_full);
312 	INIT_LIST_HEAD(&parent->slabs_partial);
313 	INIT_LIST_HEAD(&parent->slabs_free);
314 	parent->shared = NULL;
315 	parent->alien = NULL;
316 	parent->colour_next = 0;
317 	spin_lock_init(&parent->list_lock);
318 	parent->free_objects = 0;
319 	parent->free_touched = 0;
320 }
321 
322 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
323 	do {								\
324 		INIT_LIST_HEAD(listp);					\
325 		list_splice(&(cachep->node[nodeid]->slab), listp);	\
326 	} while (0)
327 
328 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
329 	do {								\
330 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
331 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
332 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
333 	} while (0)
334 
335 #define CFLGS_OFF_SLAB		(0x80000000UL)
336 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
337 
338 #define BATCHREFILL_LIMIT	16
339 /*
340  * Optimization question: fewer reaps means less probability for unnessary
341  * cpucache drain/refill cycles.
342  *
343  * OTOH the cpuarrays can contain lots of objects,
344  * which could lock up otherwise freeable slabs.
345  */
346 #define REAPTIMEOUT_CPUC	(2*HZ)
347 #define REAPTIMEOUT_LIST3	(4*HZ)
348 
349 #if STATS
350 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
351 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
352 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
353 #define	STATS_INC_GROWN(x)	((x)->grown++)
354 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
355 #define	STATS_SET_HIGH(x)						\
356 	do {								\
357 		if ((x)->num_active > (x)->high_mark)			\
358 			(x)->high_mark = (x)->num_active;		\
359 	} while (0)
360 #define	STATS_INC_ERR(x)	((x)->errors++)
361 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
362 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
363 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
364 #define	STATS_SET_FREEABLE(x, i)					\
365 	do {								\
366 		if ((x)->max_freeable < i)				\
367 			(x)->max_freeable = i;				\
368 	} while (0)
369 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
370 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
371 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
372 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
373 #else
374 #define	STATS_INC_ACTIVE(x)	do { } while (0)
375 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
376 #define	STATS_INC_ALLOCED(x)	do { } while (0)
377 #define	STATS_INC_GROWN(x)	do { } while (0)
378 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
379 #define	STATS_SET_HIGH(x)	do { } while (0)
380 #define	STATS_INC_ERR(x)	do { } while (0)
381 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
382 #define	STATS_INC_NODEFREES(x)	do { } while (0)
383 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
384 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
385 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
386 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
387 #define STATS_INC_FREEHIT(x)	do { } while (0)
388 #define STATS_INC_FREEMISS(x)	do { } while (0)
389 #endif
390 
391 #if DEBUG
392 
393 /*
394  * memory layout of objects:
395  * 0		: objp
396  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
397  * 		the end of an object is aligned with the end of the real
398  * 		allocation. Catches writes behind the end of the allocation.
399  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
400  * 		redzone word.
401  * cachep->obj_offset: The real object.
402  * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
403  * cachep->size - 1* BYTES_PER_WORD: last caller address
404  *					[BYTES_PER_WORD long]
405  */
406 static int obj_offset(struct kmem_cache *cachep)
407 {
408 	return cachep->obj_offset;
409 }
410 
411 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
412 {
413 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
414 	return (unsigned long long*) (objp + obj_offset(cachep) -
415 				      sizeof(unsigned long long));
416 }
417 
418 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
419 {
420 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
421 	if (cachep->flags & SLAB_STORE_USER)
422 		return (unsigned long long *)(objp + cachep->size -
423 					      sizeof(unsigned long long) -
424 					      REDZONE_ALIGN);
425 	return (unsigned long long *) (objp + cachep->size -
426 				       sizeof(unsigned long long));
427 }
428 
429 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
430 {
431 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
432 	return (void **)(objp + cachep->size - BYTES_PER_WORD);
433 }
434 
435 #else
436 
437 #define obj_offset(x)			0
438 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
439 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
440 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
441 
442 #endif
443 
444 /*
445  * Do not go above this order unless 0 objects fit into the slab or
446  * overridden on the command line.
447  */
448 #define	SLAB_MAX_ORDER_HI	1
449 #define	SLAB_MAX_ORDER_LO	0
450 static int slab_max_order = SLAB_MAX_ORDER_LO;
451 static bool slab_max_order_set __initdata;
452 
453 static inline struct kmem_cache *virt_to_cache(const void *obj)
454 {
455 	struct page *page = virt_to_head_page(obj);
456 	return page->slab_cache;
457 }
458 
459 static inline struct slab *virt_to_slab(const void *obj)
460 {
461 	struct page *page = virt_to_head_page(obj);
462 
463 	VM_BUG_ON(!PageSlab(page));
464 	return page->slab_page;
465 }
466 
467 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
468 				 unsigned int idx)
469 {
470 	return slab->s_mem + cache->size * idx;
471 }
472 
473 /*
474  * We want to avoid an expensive divide : (offset / cache->size)
475  *   Using the fact that size is a constant for a particular cache,
476  *   we can replace (offset / cache->size) by
477  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
478  */
479 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
480 					const struct slab *slab, void *obj)
481 {
482 	u32 offset = (obj - slab->s_mem);
483 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
484 }
485 
486 static struct arraycache_init initarray_generic =
487     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
488 
489 /* internal cache of cache description objs */
490 static struct kmem_cache kmem_cache_boot = {
491 	.batchcount = 1,
492 	.limit = BOOT_CPUCACHE_ENTRIES,
493 	.shared = 1,
494 	.size = sizeof(struct kmem_cache),
495 	.name = "kmem_cache",
496 };
497 
498 #define BAD_ALIEN_MAGIC 0x01020304ul
499 
500 #ifdef CONFIG_LOCKDEP
501 
502 /*
503  * Slab sometimes uses the kmalloc slabs to store the slab headers
504  * for other slabs "off slab".
505  * The locking for this is tricky in that it nests within the locks
506  * of all other slabs in a few places; to deal with this special
507  * locking we put on-slab caches into a separate lock-class.
508  *
509  * We set lock class for alien array caches which are up during init.
510  * The lock annotation will be lost if all cpus of a node goes down and
511  * then comes back up during hotplug
512  */
513 static struct lock_class_key on_slab_l3_key;
514 static struct lock_class_key on_slab_alc_key;
515 
516 static struct lock_class_key debugobj_l3_key;
517 static struct lock_class_key debugobj_alc_key;
518 
519 static void slab_set_lock_classes(struct kmem_cache *cachep,
520 		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
521 		int q)
522 {
523 	struct array_cache **alc;
524 	struct kmem_cache_node *n;
525 	int r;
526 
527 	n = cachep->node[q];
528 	if (!n)
529 		return;
530 
531 	lockdep_set_class(&n->list_lock, l3_key);
532 	alc = n->alien;
533 	/*
534 	 * FIXME: This check for BAD_ALIEN_MAGIC
535 	 * should go away when common slab code is taught to
536 	 * work even without alien caches.
537 	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
538 	 * for alloc_alien_cache,
539 	 */
540 	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
541 		return;
542 	for_each_node(r) {
543 		if (alc[r])
544 			lockdep_set_class(&alc[r]->lock, alc_key);
545 	}
546 }
547 
548 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
549 {
550 	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
551 }
552 
553 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
554 {
555 	int node;
556 
557 	for_each_online_node(node)
558 		slab_set_debugobj_lock_classes_node(cachep, node);
559 }
560 
561 static void init_node_lock_keys(int q)
562 {
563 	int i;
564 
565 	if (slab_state < UP)
566 		return;
567 
568 	for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
569 		struct kmem_cache_node *n;
570 		struct kmem_cache *cache = kmalloc_caches[i];
571 
572 		if (!cache)
573 			continue;
574 
575 		n = cache->node[q];
576 		if (!n || OFF_SLAB(cache))
577 			continue;
578 
579 		slab_set_lock_classes(cache, &on_slab_l3_key,
580 				&on_slab_alc_key, q);
581 	}
582 }
583 
584 static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
585 {
586 	if (!cachep->node[q])
587 		return;
588 
589 	slab_set_lock_classes(cachep, &on_slab_l3_key,
590 			&on_slab_alc_key, q);
591 }
592 
593 static inline void on_slab_lock_classes(struct kmem_cache *cachep)
594 {
595 	int node;
596 
597 	VM_BUG_ON(OFF_SLAB(cachep));
598 	for_each_node(node)
599 		on_slab_lock_classes_node(cachep, node);
600 }
601 
602 static inline void init_lock_keys(void)
603 {
604 	int node;
605 
606 	for_each_node(node)
607 		init_node_lock_keys(node);
608 }
609 #else
610 static void init_node_lock_keys(int q)
611 {
612 }
613 
614 static inline void init_lock_keys(void)
615 {
616 }
617 
618 static inline void on_slab_lock_classes(struct kmem_cache *cachep)
619 {
620 }
621 
622 static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
623 {
624 }
625 
626 static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
627 {
628 }
629 
630 static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
631 {
632 }
633 #endif
634 
635 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
636 
637 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
638 {
639 	return cachep->array[smp_processor_id()];
640 }
641 
642 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
643 {
644 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
645 }
646 
647 /*
648  * Calculate the number of objects and left-over bytes for a given buffer size.
649  */
650 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
651 			   size_t align, int flags, size_t *left_over,
652 			   unsigned int *num)
653 {
654 	int nr_objs;
655 	size_t mgmt_size;
656 	size_t slab_size = PAGE_SIZE << gfporder;
657 
658 	/*
659 	 * The slab management structure can be either off the slab or
660 	 * on it. For the latter case, the memory allocated for a
661 	 * slab is used for:
662 	 *
663 	 * - The struct slab
664 	 * - One kmem_bufctl_t for each object
665 	 * - Padding to respect alignment of @align
666 	 * - @buffer_size bytes for each object
667 	 *
668 	 * If the slab management structure is off the slab, then the
669 	 * alignment will already be calculated into the size. Because
670 	 * the slabs are all pages aligned, the objects will be at the
671 	 * correct alignment when allocated.
672 	 */
673 	if (flags & CFLGS_OFF_SLAB) {
674 		mgmt_size = 0;
675 		nr_objs = slab_size / buffer_size;
676 
677 		if (nr_objs > SLAB_LIMIT)
678 			nr_objs = SLAB_LIMIT;
679 	} else {
680 		/*
681 		 * Ignore padding for the initial guess. The padding
682 		 * is at most @align-1 bytes, and @buffer_size is at
683 		 * least @align. In the worst case, this result will
684 		 * be one greater than the number of objects that fit
685 		 * into the memory allocation when taking the padding
686 		 * into account.
687 		 */
688 		nr_objs = (slab_size - sizeof(struct slab)) /
689 			  (buffer_size + sizeof(kmem_bufctl_t));
690 
691 		/*
692 		 * This calculated number will be either the right
693 		 * amount, or one greater than what we want.
694 		 */
695 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
696 		       > slab_size)
697 			nr_objs--;
698 
699 		if (nr_objs > SLAB_LIMIT)
700 			nr_objs = SLAB_LIMIT;
701 
702 		mgmt_size = slab_mgmt_size(nr_objs, align);
703 	}
704 	*num = nr_objs;
705 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
706 }
707 
708 #if DEBUG
709 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
710 
711 static void __slab_error(const char *function, struct kmem_cache *cachep,
712 			char *msg)
713 {
714 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
715 	       function, cachep->name, msg);
716 	dump_stack();
717 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
718 }
719 #endif
720 
721 /*
722  * By default on NUMA we use alien caches to stage the freeing of
723  * objects allocated from other nodes. This causes massive memory
724  * inefficiencies when using fake NUMA setup to split memory into a
725  * large number of small nodes, so it can be disabled on the command
726  * line
727   */
728 
729 static int use_alien_caches __read_mostly = 1;
730 static int __init noaliencache_setup(char *s)
731 {
732 	use_alien_caches = 0;
733 	return 1;
734 }
735 __setup("noaliencache", noaliencache_setup);
736 
737 static int __init slab_max_order_setup(char *str)
738 {
739 	get_option(&str, &slab_max_order);
740 	slab_max_order = slab_max_order < 0 ? 0 :
741 				min(slab_max_order, MAX_ORDER - 1);
742 	slab_max_order_set = true;
743 
744 	return 1;
745 }
746 __setup("slab_max_order=", slab_max_order_setup);
747 
748 #ifdef CONFIG_NUMA
749 /*
750  * Special reaping functions for NUMA systems called from cache_reap().
751  * These take care of doing round robin flushing of alien caches (containing
752  * objects freed on different nodes from which they were allocated) and the
753  * flushing of remote pcps by calling drain_node_pages.
754  */
755 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
756 
757 static void init_reap_node(int cpu)
758 {
759 	int node;
760 
761 	node = next_node(cpu_to_mem(cpu), node_online_map);
762 	if (node == MAX_NUMNODES)
763 		node = first_node(node_online_map);
764 
765 	per_cpu(slab_reap_node, cpu) = node;
766 }
767 
768 static void next_reap_node(void)
769 {
770 	int node = __this_cpu_read(slab_reap_node);
771 
772 	node = next_node(node, node_online_map);
773 	if (unlikely(node >= MAX_NUMNODES))
774 		node = first_node(node_online_map);
775 	__this_cpu_write(slab_reap_node, node);
776 }
777 
778 #else
779 #define init_reap_node(cpu) do { } while (0)
780 #define next_reap_node(void) do { } while (0)
781 #endif
782 
783 /*
784  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
785  * via the workqueue/eventd.
786  * Add the CPU number into the expiration time to minimize the possibility of
787  * the CPUs getting into lockstep and contending for the global cache chain
788  * lock.
789  */
790 static void __cpuinit start_cpu_timer(int cpu)
791 {
792 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
793 
794 	/*
795 	 * When this gets called from do_initcalls via cpucache_init(),
796 	 * init_workqueues() has already run, so keventd will be setup
797 	 * at that time.
798 	 */
799 	if (keventd_up() && reap_work->work.func == NULL) {
800 		init_reap_node(cpu);
801 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
802 		schedule_delayed_work_on(cpu, reap_work,
803 					__round_jiffies_relative(HZ, cpu));
804 	}
805 }
806 
807 static struct array_cache *alloc_arraycache(int node, int entries,
808 					    int batchcount, gfp_t gfp)
809 {
810 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
811 	struct array_cache *nc = NULL;
812 
813 	nc = kmalloc_node(memsize, gfp, node);
814 	/*
815 	 * The array_cache structures contain pointers to free object.
816 	 * However, when such objects are allocated or transferred to another
817 	 * cache the pointers are not cleared and they could be counted as
818 	 * valid references during a kmemleak scan. Therefore, kmemleak must
819 	 * not scan such objects.
820 	 */
821 	kmemleak_no_scan(nc);
822 	if (nc) {
823 		nc->avail = 0;
824 		nc->limit = entries;
825 		nc->batchcount = batchcount;
826 		nc->touched = 0;
827 		spin_lock_init(&nc->lock);
828 	}
829 	return nc;
830 }
831 
832 static inline bool is_slab_pfmemalloc(struct slab *slabp)
833 {
834 	struct page *page = virt_to_page(slabp->s_mem);
835 
836 	return PageSlabPfmemalloc(page);
837 }
838 
839 /* Clears pfmemalloc_active if no slabs have pfmalloc set */
840 static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
841 						struct array_cache *ac)
842 {
843 	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
844 	struct slab *slabp;
845 	unsigned long flags;
846 
847 	if (!pfmemalloc_active)
848 		return;
849 
850 	spin_lock_irqsave(&n->list_lock, flags);
851 	list_for_each_entry(slabp, &n->slabs_full, list)
852 		if (is_slab_pfmemalloc(slabp))
853 			goto out;
854 
855 	list_for_each_entry(slabp, &n->slabs_partial, list)
856 		if (is_slab_pfmemalloc(slabp))
857 			goto out;
858 
859 	list_for_each_entry(slabp, &n->slabs_free, list)
860 		if (is_slab_pfmemalloc(slabp))
861 			goto out;
862 
863 	pfmemalloc_active = false;
864 out:
865 	spin_unlock_irqrestore(&n->list_lock, flags);
866 }
867 
868 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
869 						gfp_t flags, bool force_refill)
870 {
871 	int i;
872 	void *objp = ac->entry[--ac->avail];
873 
874 	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
875 	if (unlikely(is_obj_pfmemalloc(objp))) {
876 		struct kmem_cache_node *n;
877 
878 		if (gfp_pfmemalloc_allowed(flags)) {
879 			clear_obj_pfmemalloc(&objp);
880 			return objp;
881 		}
882 
883 		/* The caller cannot use PFMEMALLOC objects, find another one */
884 		for (i = 0; i < ac->avail; i++) {
885 			/* If a !PFMEMALLOC object is found, swap them */
886 			if (!is_obj_pfmemalloc(ac->entry[i])) {
887 				objp = ac->entry[i];
888 				ac->entry[i] = ac->entry[ac->avail];
889 				ac->entry[ac->avail] = objp;
890 				return objp;
891 			}
892 		}
893 
894 		/*
895 		 * If there are empty slabs on the slabs_free list and we are
896 		 * being forced to refill the cache, mark this one !pfmemalloc.
897 		 */
898 		n = cachep->node[numa_mem_id()];
899 		if (!list_empty(&n->slabs_free) && force_refill) {
900 			struct slab *slabp = virt_to_slab(objp);
901 			ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
902 			clear_obj_pfmemalloc(&objp);
903 			recheck_pfmemalloc_active(cachep, ac);
904 			return objp;
905 		}
906 
907 		/* No !PFMEMALLOC objects available */
908 		ac->avail++;
909 		objp = NULL;
910 	}
911 
912 	return objp;
913 }
914 
915 static inline void *ac_get_obj(struct kmem_cache *cachep,
916 			struct array_cache *ac, gfp_t flags, bool force_refill)
917 {
918 	void *objp;
919 
920 	if (unlikely(sk_memalloc_socks()))
921 		objp = __ac_get_obj(cachep, ac, flags, force_refill);
922 	else
923 		objp = ac->entry[--ac->avail];
924 
925 	return objp;
926 }
927 
928 static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
929 								void *objp)
930 {
931 	if (unlikely(pfmemalloc_active)) {
932 		/* Some pfmemalloc slabs exist, check if this is one */
933 		struct page *page = virt_to_head_page(objp);
934 		if (PageSlabPfmemalloc(page))
935 			set_obj_pfmemalloc(&objp);
936 	}
937 
938 	return objp;
939 }
940 
941 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
942 								void *objp)
943 {
944 	if (unlikely(sk_memalloc_socks()))
945 		objp = __ac_put_obj(cachep, ac, objp);
946 
947 	ac->entry[ac->avail++] = objp;
948 }
949 
950 /*
951  * Transfer objects in one arraycache to another.
952  * Locking must be handled by the caller.
953  *
954  * Return the number of entries transferred.
955  */
956 static int transfer_objects(struct array_cache *to,
957 		struct array_cache *from, unsigned int max)
958 {
959 	/* Figure out how many entries to transfer */
960 	int nr = min3(from->avail, max, to->limit - to->avail);
961 
962 	if (!nr)
963 		return 0;
964 
965 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
966 			sizeof(void *) *nr);
967 
968 	from->avail -= nr;
969 	to->avail += nr;
970 	return nr;
971 }
972 
973 #ifndef CONFIG_NUMA
974 
975 #define drain_alien_cache(cachep, alien) do { } while (0)
976 #define reap_alien(cachep, n) do { } while (0)
977 
978 static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
979 {
980 	return (struct array_cache **)BAD_ALIEN_MAGIC;
981 }
982 
983 static inline void free_alien_cache(struct array_cache **ac_ptr)
984 {
985 }
986 
987 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
988 {
989 	return 0;
990 }
991 
992 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
993 		gfp_t flags)
994 {
995 	return NULL;
996 }
997 
998 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
999 		 gfp_t flags, int nodeid)
1000 {
1001 	return NULL;
1002 }
1003 
1004 #else	/* CONFIG_NUMA */
1005 
1006 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1007 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1008 
1009 static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1010 {
1011 	struct array_cache **ac_ptr;
1012 	int memsize = sizeof(void *) * nr_node_ids;
1013 	int i;
1014 
1015 	if (limit > 1)
1016 		limit = 12;
1017 	ac_ptr = kzalloc_node(memsize, gfp, node);
1018 	if (ac_ptr) {
1019 		for_each_node(i) {
1020 			if (i == node || !node_online(i))
1021 				continue;
1022 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1023 			if (!ac_ptr[i]) {
1024 				for (i--; i >= 0; i--)
1025 					kfree(ac_ptr[i]);
1026 				kfree(ac_ptr);
1027 				return NULL;
1028 			}
1029 		}
1030 	}
1031 	return ac_ptr;
1032 }
1033 
1034 static void free_alien_cache(struct array_cache **ac_ptr)
1035 {
1036 	int i;
1037 
1038 	if (!ac_ptr)
1039 		return;
1040 	for_each_node(i)
1041 	    kfree(ac_ptr[i]);
1042 	kfree(ac_ptr);
1043 }
1044 
1045 static void __drain_alien_cache(struct kmem_cache *cachep,
1046 				struct array_cache *ac, int node)
1047 {
1048 	struct kmem_cache_node *n = cachep->node[node];
1049 
1050 	if (ac->avail) {
1051 		spin_lock(&n->list_lock);
1052 		/*
1053 		 * Stuff objects into the remote nodes shared array first.
1054 		 * That way we could avoid the overhead of putting the objects
1055 		 * into the free lists and getting them back later.
1056 		 */
1057 		if (n->shared)
1058 			transfer_objects(n->shared, ac, ac->limit);
1059 
1060 		free_block(cachep, ac->entry, ac->avail, node);
1061 		ac->avail = 0;
1062 		spin_unlock(&n->list_lock);
1063 	}
1064 }
1065 
1066 /*
1067  * Called from cache_reap() to regularly drain alien caches round robin.
1068  */
1069 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1070 {
1071 	int node = __this_cpu_read(slab_reap_node);
1072 
1073 	if (n->alien) {
1074 		struct array_cache *ac = n->alien[node];
1075 
1076 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1077 			__drain_alien_cache(cachep, ac, node);
1078 			spin_unlock_irq(&ac->lock);
1079 		}
1080 	}
1081 }
1082 
1083 static void drain_alien_cache(struct kmem_cache *cachep,
1084 				struct array_cache **alien)
1085 {
1086 	int i = 0;
1087 	struct array_cache *ac;
1088 	unsigned long flags;
1089 
1090 	for_each_online_node(i) {
1091 		ac = alien[i];
1092 		if (ac) {
1093 			spin_lock_irqsave(&ac->lock, flags);
1094 			__drain_alien_cache(cachep, ac, i);
1095 			spin_unlock_irqrestore(&ac->lock, flags);
1096 		}
1097 	}
1098 }
1099 
1100 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1101 {
1102 	struct slab *slabp = virt_to_slab(objp);
1103 	int nodeid = slabp->nodeid;
1104 	struct kmem_cache_node *n;
1105 	struct array_cache *alien = NULL;
1106 	int node;
1107 
1108 	node = numa_mem_id();
1109 
1110 	/*
1111 	 * Make sure we are not freeing a object from another node to the array
1112 	 * cache on this cpu.
1113 	 */
1114 	if (likely(slabp->nodeid == node))
1115 		return 0;
1116 
1117 	n = cachep->node[node];
1118 	STATS_INC_NODEFREES(cachep);
1119 	if (n->alien && n->alien[nodeid]) {
1120 		alien = n->alien[nodeid];
1121 		spin_lock(&alien->lock);
1122 		if (unlikely(alien->avail == alien->limit)) {
1123 			STATS_INC_ACOVERFLOW(cachep);
1124 			__drain_alien_cache(cachep, alien, nodeid);
1125 		}
1126 		ac_put_obj(cachep, alien, objp);
1127 		spin_unlock(&alien->lock);
1128 	} else {
1129 		spin_lock(&(cachep->node[nodeid])->list_lock);
1130 		free_block(cachep, &objp, 1, nodeid);
1131 		spin_unlock(&(cachep->node[nodeid])->list_lock);
1132 	}
1133 	return 1;
1134 }
1135 #endif
1136 
1137 /*
1138  * Allocates and initializes node for a node on each slab cache, used for
1139  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1140  * will be allocated off-node since memory is not yet online for the new node.
1141  * When hotplugging memory or a cpu, existing node are not replaced if
1142  * already in use.
1143  *
1144  * Must hold slab_mutex.
1145  */
1146 static int init_cache_node_node(int node)
1147 {
1148 	struct kmem_cache *cachep;
1149 	struct kmem_cache_node *n;
1150 	const int memsize = sizeof(struct kmem_cache_node);
1151 
1152 	list_for_each_entry(cachep, &slab_caches, list) {
1153 		/*
1154 		 * Set up the size64 kmemlist for cpu before we can
1155 		 * begin anything. Make sure some other cpu on this
1156 		 * node has not already allocated this
1157 		 */
1158 		if (!cachep->node[node]) {
1159 			n = kmalloc_node(memsize, GFP_KERNEL, node);
1160 			if (!n)
1161 				return -ENOMEM;
1162 			kmem_cache_node_init(n);
1163 			n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1164 			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1165 
1166 			/*
1167 			 * The l3s don't come and go as CPUs come and
1168 			 * go.  slab_mutex is sufficient
1169 			 * protection here.
1170 			 */
1171 			cachep->node[node] = n;
1172 		}
1173 
1174 		spin_lock_irq(&cachep->node[node]->list_lock);
1175 		cachep->node[node]->free_limit =
1176 			(1 + nr_cpus_node(node)) *
1177 			cachep->batchcount + cachep->num;
1178 		spin_unlock_irq(&cachep->node[node]->list_lock);
1179 	}
1180 	return 0;
1181 }
1182 
1183 static void __cpuinit cpuup_canceled(long cpu)
1184 {
1185 	struct kmem_cache *cachep;
1186 	struct kmem_cache_node *n = NULL;
1187 	int node = cpu_to_mem(cpu);
1188 	const struct cpumask *mask = cpumask_of_node(node);
1189 
1190 	list_for_each_entry(cachep, &slab_caches, list) {
1191 		struct array_cache *nc;
1192 		struct array_cache *shared;
1193 		struct array_cache **alien;
1194 
1195 		/* cpu is dead; no one can alloc from it. */
1196 		nc = cachep->array[cpu];
1197 		cachep->array[cpu] = NULL;
1198 		n = cachep->node[node];
1199 
1200 		if (!n)
1201 			goto free_array_cache;
1202 
1203 		spin_lock_irq(&n->list_lock);
1204 
1205 		/* Free limit for this kmem_cache_node */
1206 		n->free_limit -= cachep->batchcount;
1207 		if (nc)
1208 			free_block(cachep, nc->entry, nc->avail, node);
1209 
1210 		if (!cpumask_empty(mask)) {
1211 			spin_unlock_irq(&n->list_lock);
1212 			goto free_array_cache;
1213 		}
1214 
1215 		shared = n->shared;
1216 		if (shared) {
1217 			free_block(cachep, shared->entry,
1218 				   shared->avail, node);
1219 			n->shared = NULL;
1220 		}
1221 
1222 		alien = n->alien;
1223 		n->alien = NULL;
1224 
1225 		spin_unlock_irq(&n->list_lock);
1226 
1227 		kfree(shared);
1228 		if (alien) {
1229 			drain_alien_cache(cachep, alien);
1230 			free_alien_cache(alien);
1231 		}
1232 free_array_cache:
1233 		kfree(nc);
1234 	}
1235 	/*
1236 	 * In the previous loop, all the objects were freed to
1237 	 * the respective cache's slabs,  now we can go ahead and
1238 	 * shrink each nodelist to its limit.
1239 	 */
1240 	list_for_each_entry(cachep, &slab_caches, list) {
1241 		n = cachep->node[node];
1242 		if (!n)
1243 			continue;
1244 		drain_freelist(cachep, n, n->free_objects);
1245 	}
1246 }
1247 
1248 static int __cpuinit cpuup_prepare(long cpu)
1249 {
1250 	struct kmem_cache *cachep;
1251 	struct kmem_cache_node *n = NULL;
1252 	int node = cpu_to_mem(cpu);
1253 	int err;
1254 
1255 	/*
1256 	 * We need to do this right in the beginning since
1257 	 * alloc_arraycache's are going to use this list.
1258 	 * kmalloc_node allows us to add the slab to the right
1259 	 * kmem_cache_node and not this cpu's kmem_cache_node
1260 	 */
1261 	err = init_cache_node_node(node);
1262 	if (err < 0)
1263 		goto bad;
1264 
1265 	/*
1266 	 * Now we can go ahead with allocating the shared arrays and
1267 	 * array caches
1268 	 */
1269 	list_for_each_entry(cachep, &slab_caches, list) {
1270 		struct array_cache *nc;
1271 		struct array_cache *shared = NULL;
1272 		struct array_cache **alien = NULL;
1273 
1274 		nc = alloc_arraycache(node, cachep->limit,
1275 					cachep->batchcount, GFP_KERNEL);
1276 		if (!nc)
1277 			goto bad;
1278 		if (cachep->shared) {
1279 			shared = alloc_arraycache(node,
1280 				cachep->shared * cachep->batchcount,
1281 				0xbaadf00d, GFP_KERNEL);
1282 			if (!shared) {
1283 				kfree(nc);
1284 				goto bad;
1285 			}
1286 		}
1287 		if (use_alien_caches) {
1288 			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1289 			if (!alien) {
1290 				kfree(shared);
1291 				kfree(nc);
1292 				goto bad;
1293 			}
1294 		}
1295 		cachep->array[cpu] = nc;
1296 		n = cachep->node[node];
1297 		BUG_ON(!n);
1298 
1299 		spin_lock_irq(&n->list_lock);
1300 		if (!n->shared) {
1301 			/*
1302 			 * We are serialised from CPU_DEAD or
1303 			 * CPU_UP_CANCELLED by the cpucontrol lock
1304 			 */
1305 			n->shared = shared;
1306 			shared = NULL;
1307 		}
1308 #ifdef CONFIG_NUMA
1309 		if (!n->alien) {
1310 			n->alien = alien;
1311 			alien = NULL;
1312 		}
1313 #endif
1314 		spin_unlock_irq(&n->list_lock);
1315 		kfree(shared);
1316 		free_alien_cache(alien);
1317 		if (cachep->flags & SLAB_DEBUG_OBJECTS)
1318 			slab_set_debugobj_lock_classes_node(cachep, node);
1319 		else if (!OFF_SLAB(cachep) &&
1320 			 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1321 			on_slab_lock_classes_node(cachep, node);
1322 	}
1323 	init_node_lock_keys(node);
1324 
1325 	return 0;
1326 bad:
1327 	cpuup_canceled(cpu);
1328 	return -ENOMEM;
1329 }
1330 
1331 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1332 				    unsigned long action, void *hcpu)
1333 {
1334 	long cpu = (long)hcpu;
1335 	int err = 0;
1336 
1337 	switch (action) {
1338 	case CPU_UP_PREPARE:
1339 	case CPU_UP_PREPARE_FROZEN:
1340 		mutex_lock(&slab_mutex);
1341 		err = cpuup_prepare(cpu);
1342 		mutex_unlock(&slab_mutex);
1343 		break;
1344 	case CPU_ONLINE:
1345 	case CPU_ONLINE_FROZEN:
1346 		start_cpu_timer(cpu);
1347 		break;
1348 #ifdef CONFIG_HOTPLUG_CPU
1349   	case CPU_DOWN_PREPARE:
1350   	case CPU_DOWN_PREPARE_FROZEN:
1351 		/*
1352 		 * Shutdown cache reaper. Note that the slab_mutex is
1353 		 * held so that if cache_reap() is invoked it cannot do
1354 		 * anything expensive but will only modify reap_work
1355 		 * and reschedule the timer.
1356 		*/
1357 		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1358 		/* Now the cache_reaper is guaranteed to be not running. */
1359 		per_cpu(slab_reap_work, cpu).work.func = NULL;
1360   		break;
1361   	case CPU_DOWN_FAILED:
1362   	case CPU_DOWN_FAILED_FROZEN:
1363 		start_cpu_timer(cpu);
1364   		break;
1365 	case CPU_DEAD:
1366 	case CPU_DEAD_FROZEN:
1367 		/*
1368 		 * Even if all the cpus of a node are down, we don't free the
1369 		 * kmem_cache_node of any cache. This to avoid a race between
1370 		 * cpu_down, and a kmalloc allocation from another cpu for
1371 		 * memory from the node of the cpu going down.  The node
1372 		 * structure is usually allocated from kmem_cache_create() and
1373 		 * gets destroyed at kmem_cache_destroy().
1374 		 */
1375 		/* fall through */
1376 #endif
1377 	case CPU_UP_CANCELED:
1378 	case CPU_UP_CANCELED_FROZEN:
1379 		mutex_lock(&slab_mutex);
1380 		cpuup_canceled(cpu);
1381 		mutex_unlock(&slab_mutex);
1382 		break;
1383 	}
1384 	return notifier_from_errno(err);
1385 }
1386 
1387 static struct notifier_block __cpuinitdata cpucache_notifier = {
1388 	&cpuup_callback, NULL, 0
1389 };
1390 
1391 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1392 /*
1393  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1394  * Returns -EBUSY if all objects cannot be drained so that the node is not
1395  * removed.
1396  *
1397  * Must hold slab_mutex.
1398  */
1399 static int __meminit drain_cache_node_node(int node)
1400 {
1401 	struct kmem_cache *cachep;
1402 	int ret = 0;
1403 
1404 	list_for_each_entry(cachep, &slab_caches, list) {
1405 		struct kmem_cache_node *n;
1406 
1407 		n = cachep->node[node];
1408 		if (!n)
1409 			continue;
1410 
1411 		drain_freelist(cachep, n, n->free_objects);
1412 
1413 		if (!list_empty(&n->slabs_full) ||
1414 		    !list_empty(&n->slabs_partial)) {
1415 			ret = -EBUSY;
1416 			break;
1417 		}
1418 	}
1419 	return ret;
1420 }
1421 
1422 static int __meminit slab_memory_callback(struct notifier_block *self,
1423 					unsigned long action, void *arg)
1424 {
1425 	struct memory_notify *mnb = arg;
1426 	int ret = 0;
1427 	int nid;
1428 
1429 	nid = mnb->status_change_nid;
1430 	if (nid < 0)
1431 		goto out;
1432 
1433 	switch (action) {
1434 	case MEM_GOING_ONLINE:
1435 		mutex_lock(&slab_mutex);
1436 		ret = init_cache_node_node(nid);
1437 		mutex_unlock(&slab_mutex);
1438 		break;
1439 	case MEM_GOING_OFFLINE:
1440 		mutex_lock(&slab_mutex);
1441 		ret = drain_cache_node_node(nid);
1442 		mutex_unlock(&slab_mutex);
1443 		break;
1444 	case MEM_ONLINE:
1445 	case MEM_OFFLINE:
1446 	case MEM_CANCEL_ONLINE:
1447 	case MEM_CANCEL_OFFLINE:
1448 		break;
1449 	}
1450 out:
1451 	return notifier_from_errno(ret);
1452 }
1453 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1454 
1455 /*
1456  * swap the static kmem_cache_node with kmalloced memory
1457  */
1458 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1459 				int nodeid)
1460 {
1461 	struct kmem_cache_node *ptr;
1462 
1463 	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1464 	BUG_ON(!ptr);
1465 
1466 	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1467 	/*
1468 	 * Do not assume that spinlocks can be initialized via memcpy:
1469 	 */
1470 	spin_lock_init(&ptr->list_lock);
1471 
1472 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1473 	cachep->node[nodeid] = ptr;
1474 }
1475 
1476 /*
1477  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1478  * size of kmem_cache_node.
1479  */
1480 static void __init set_up_node(struct kmem_cache *cachep, int index)
1481 {
1482 	int node;
1483 
1484 	for_each_online_node(node) {
1485 		cachep->node[node] = &init_kmem_cache_node[index + node];
1486 		cachep->node[node]->next_reap = jiffies +
1487 		    REAPTIMEOUT_LIST3 +
1488 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1489 	}
1490 }
1491 
1492 /*
1493  * The memory after the last cpu cache pointer is used for the
1494  * the node pointer.
1495  */
1496 static void setup_node_pointer(struct kmem_cache *cachep)
1497 {
1498 	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1499 }
1500 
1501 /*
1502  * Initialisation.  Called after the page allocator have been initialised and
1503  * before smp_init().
1504  */
1505 void __init kmem_cache_init(void)
1506 {
1507 	int i;
1508 
1509 	kmem_cache = &kmem_cache_boot;
1510 	setup_node_pointer(kmem_cache);
1511 
1512 	if (num_possible_nodes() == 1)
1513 		use_alien_caches = 0;
1514 
1515 	for (i = 0; i < NUM_INIT_LISTS; i++)
1516 		kmem_cache_node_init(&init_kmem_cache_node[i]);
1517 
1518 	set_up_node(kmem_cache, CACHE_CACHE);
1519 
1520 	/*
1521 	 * Fragmentation resistance on low memory - only use bigger
1522 	 * page orders on machines with more than 32MB of memory if
1523 	 * not overridden on the command line.
1524 	 */
1525 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1526 		slab_max_order = SLAB_MAX_ORDER_HI;
1527 
1528 	/* Bootstrap is tricky, because several objects are allocated
1529 	 * from caches that do not exist yet:
1530 	 * 1) initialize the kmem_cache cache: it contains the struct
1531 	 *    kmem_cache structures of all caches, except kmem_cache itself:
1532 	 *    kmem_cache is statically allocated.
1533 	 *    Initially an __init data area is used for the head array and the
1534 	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1535 	 *    array at the end of the bootstrap.
1536 	 * 2) Create the first kmalloc cache.
1537 	 *    The struct kmem_cache for the new cache is allocated normally.
1538 	 *    An __init data area is used for the head array.
1539 	 * 3) Create the remaining kmalloc caches, with minimally sized
1540 	 *    head arrays.
1541 	 * 4) Replace the __init data head arrays for kmem_cache and the first
1542 	 *    kmalloc cache with kmalloc allocated arrays.
1543 	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1544 	 *    the other cache's with kmalloc allocated memory.
1545 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1546 	 */
1547 
1548 	/* 1) create the kmem_cache */
1549 
1550 	/*
1551 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1552 	 */
1553 	create_boot_cache(kmem_cache, "kmem_cache",
1554 		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1555 				  nr_node_ids * sizeof(struct kmem_cache_node *),
1556 				  SLAB_HWCACHE_ALIGN);
1557 	list_add(&kmem_cache->list, &slab_caches);
1558 
1559 	/* 2+3) create the kmalloc caches */
1560 
1561 	/*
1562 	 * Initialize the caches that provide memory for the array cache and the
1563 	 * kmem_cache_node structures first.  Without this, further allocations will
1564 	 * bug.
1565 	 */
1566 
1567 	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1568 					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1569 
1570 	if (INDEX_AC != INDEX_NODE)
1571 		kmalloc_caches[INDEX_NODE] =
1572 			create_kmalloc_cache("kmalloc-node",
1573 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1574 
1575 	slab_early_init = 0;
1576 
1577 	/* 4) Replace the bootstrap head arrays */
1578 	{
1579 		struct array_cache *ptr;
1580 
1581 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1582 
1583 		memcpy(ptr, cpu_cache_get(kmem_cache),
1584 		       sizeof(struct arraycache_init));
1585 		/*
1586 		 * Do not assume that spinlocks can be initialized via memcpy:
1587 		 */
1588 		spin_lock_init(&ptr->lock);
1589 
1590 		kmem_cache->array[smp_processor_id()] = ptr;
1591 
1592 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1593 
1594 		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
1595 		       != &initarray_generic.cache);
1596 		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
1597 		       sizeof(struct arraycache_init));
1598 		/*
1599 		 * Do not assume that spinlocks can be initialized via memcpy:
1600 		 */
1601 		spin_lock_init(&ptr->lock);
1602 
1603 		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1604 	}
1605 	/* 5) Replace the bootstrap kmem_cache_node */
1606 	{
1607 		int nid;
1608 
1609 		for_each_online_node(nid) {
1610 			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1611 
1612 			init_list(kmalloc_caches[INDEX_AC],
1613 				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1614 
1615 			if (INDEX_AC != INDEX_NODE) {
1616 				init_list(kmalloc_caches[INDEX_NODE],
1617 					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1618 			}
1619 		}
1620 	}
1621 
1622 	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1623 }
1624 
1625 void __init kmem_cache_init_late(void)
1626 {
1627 	struct kmem_cache *cachep;
1628 
1629 	slab_state = UP;
1630 
1631 	/* 6) resize the head arrays to their final sizes */
1632 	mutex_lock(&slab_mutex);
1633 	list_for_each_entry(cachep, &slab_caches, list)
1634 		if (enable_cpucache(cachep, GFP_NOWAIT))
1635 			BUG();
1636 	mutex_unlock(&slab_mutex);
1637 
1638 	/* Annotate slab for lockdep -- annotate the malloc caches */
1639 	init_lock_keys();
1640 
1641 	/* Done! */
1642 	slab_state = FULL;
1643 
1644 	/*
1645 	 * Register a cpu startup notifier callback that initializes
1646 	 * cpu_cache_get for all new cpus
1647 	 */
1648 	register_cpu_notifier(&cpucache_notifier);
1649 
1650 #ifdef CONFIG_NUMA
1651 	/*
1652 	 * Register a memory hotplug callback that initializes and frees
1653 	 * node.
1654 	 */
1655 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1656 #endif
1657 
1658 	/*
1659 	 * The reap timers are started later, with a module init call: That part
1660 	 * of the kernel is not yet operational.
1661 	 */
1662 }
1663 
1664 static int __init cpucache_init(void)
1665 {
1666 	int cpu;
1667 
1668 	/*
1669 	 * Register the timers that return unneeded pages to the page allocator
1670 	 */
1671 	for_each_online_cpu(cpu)
1672 		start_cpu_timer(cpu);
1673 
1674 	/* Done! */
1675 	slab_state = FULL;
1676 	return 0;
1677 }
1678 __initcall(cpucache_init);
1679 
1680 static noinline void
1681 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1682 {
1683 	struct kmem_cache_node *n;
1684 	struct slab *slabp;
1685 	unsigned long flags;
1686 	int node;
1687 
1688 	printk(KERN_WARNING
1689 		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1690 		nodeid, gfpflags);
1691 	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1692 		cachep->name, cachep->size, cachep->gfporder);
1693 
1694 	for_each_online_node(node) {
1695 		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1696 		unsigned long active_slabs = 0, num_slabs = 0;
1697 
1698 		n = cachep->node[node];
1699 		if (!n)
1700 			continue;
1701 
1702 		spin_lock_irqsave(&n->list_lock, flags);
1703 		list_for_each_entry(slabp, &n->slabs_full, list) {
1704 			active_objs += cachep->num;
1705 			active_slabs++;
1706 		}
1707 		list_for_each_entry(slabp, &n->slabs_partial, list) {
1708 			active_objs += slabp->inuse;
1709 			active_slabs++;
1710 		}
1711 		list_for_each_entry(slabp, &n->slabs_free, list)
1712 			num_slabs++;
1713 
1714 		free_objects += n->free_objects;
1715 		spin_unlock_irqrestore(&n->list_lock, flags);
1716 
1717 		num_slabs += active_slabs;
1718 		num_objs = num_slabs * cachep->num;
1719 		printk(KERN_WARNING
1720 			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1721 			node, active_slabs, num_slabs, active_objs, num_objs,
1722 			free_objects);
1723 	}
1724 }
1725 
1726 /*
1727  * Interface to system's page allocator. No need to hold the cache-lock.
1728  *
1729  * If we requested dmaable memory, we will get it. Even if we
1730  * did not request dmaable memory, we might get it, but that
1731  * would be relatively rare and ignorable.
1732  */
1733 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1734 {
1735 	struct page *page;
1736 	int nr_pages;
1737 	int i;
1738 
1739 #ifndef CONFIG_MMU
1740 	/*
1741 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1742 	 * requires __GFP_COMP to properly refcount higher order allocations
1743 	 */
1744 	flags |= __GFP_COMP;
1745 #endif
1746 
1747 	flags |= cachep->allocflags;
1748 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1749 		flags |= __GFP_RECLAIMABLE;
1750 
1751 	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1752 	if (!page) {
1753 		if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1754 			slab_out_of_memory(cachep, flags, nodeid);
1755 		return NULL;
1756 	}
1757 
1758 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1759 	if (unlikely(page->pfmemalloc))
1760 		pfmemalloc_active = true;
1761 
1762 	nr_pages = (1 << cachep->gfporder);
1763 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1764 		add_zone_page_state(page_zone(page),
1765 			NR_SLAB_RECLAIMABLE, nr_pages);
1766 	else
1767 		add_zone_page_state(page_zone(page),
1768 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1769 	for (i = 0; i < nr_pages; i++) {
1770 		__SetPageSlab(page + i);
1771 
1772 		if (page->pfmemalloc)
1773 			SetPageSlabPfmemalloc(page + i);
1774 	}
1775 	memcg_bind_pages(cachep, cachep->gfporder);
1776 
1777 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1778 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1779 
1780 		if (cachep->ctor)
1781 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1782 		else
1783 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1784 	}
1785 
1786 	return page_address(page);
1787 }
1788 
1789 /*
1790  * Interface to system's page release.
1791  */
1792 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1793 {
1794 	unsigned long i = (1 << cachep->gfporder);
1795 	struct page *page = virt_to_page(addr);
1796 	const unsigned long nr_freed = i;
1797 
1798 	kmemcheck_free_shadow(page, cachep->gfporder);
1799 
1800 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1801 		sub_zone_page_state(page_zone(page),
1802 				NR_SLAB_RECLAIMABLE, nr_freed);
1803 	else
1804 		sub_zone_page_state(page_zone(page),
1805 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1806 	while (i--) {
1807 		BUG_ON(!PageSlab(page));
1808 		__ClearPageSlabPfmemalloc(page);
1809 		__ClearPageSlab(page);
1810 		page++;
1811 	}
1812 
1813 	memcg_release_pages(cachep, cachep->gfporder);
1814 	if (current->reclaim_state)
1815 		current->reclaim_state->reclaimed_slab += nr_freed;
1816 	free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
1817 }
1818 
1819 static void kmem_rcu_free(struct rcu_head *head)
1820 {
1821 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1822 	struct kmem_cache *cachep = slab_rcu->cachep;
1823 
1824 	kmem_freepages(cachep, slab_rcu->addr);
1825 	if (OFF_SLAB(cachep))
1826 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1827 }
1828 
1829 #if DEBUG
1830 
1831 #ifdef CONFIG_DEBUG_PAGEALLOC
1832 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1833 			    unsigned long caller)
1834 {
1835 	int size = cachep->object_size;
1836 
1837 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1838 
1839 	if (size < 5 * sizeof(unsigned long))
1840 		return;
1841 
1842 	*addr++ = 0x12345678;
1843 	*addr++ = caller;
1844 	*addr++ = smp_processor_id();
1845 	size -= 3 * sizeof(unsigned long);
1846 	{
1847 		unsigned long *sptr = &caller;
1848 		unsigned long svalue;
1849 
1850 		while (!kstack_end(sptr)) {
1851 			svalue = *sptr++;
1852 			if (kernel_text_address(svalue)) {
1853 				*addr++ = svalue;
1854 				size -= sizeof(unsigned long);
1855 				if (size <= sizeof(unsigned long))
1856 					break;
1857 			}
1858 		}
1859 
1860 	}
1861 	*addr++ = 0x87654321;
1862 }
1863 #endif
1864 
1865 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1866 {
1867 	int size = cachep->object_size;
1868 	addr = &((char *)addr)[obj_offset(cachep)];
1869 
1870 	memset(addr, val, size);
1871 	*(unsigned char *)(addr + size - 1) = POISON_END;
1872 }
1873 
1874 static void dump_line(char *data, int offset, int limit)
1875 {
1876 	int i;
1877 	unsigned char error = 0;
1878 	int bad_count = 0;
1879 
1880 	printk(KERN_ERR "%03x: ", offset);
1881 	for (i = 0; i < limit; i++) {
1882 		if (data[offset + i] != POISON_FREE) {
1883 			error = data[offset + i];
1884 			bad_count++;
1885 		}
1886 	}
1887 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1888 			&data[offset], limit, 1);
1889 
1890 	if (bad_count == 1) {
1891 		error ^= POISON_FREE;
1892 		if (!(error & (error - 1))) {
1893 			printk(KERN_ERR "Single bit error detected. Probably "
1894 					"bad RAM.\n");
1895 #ifdef CONFIG_X86
1896 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1897 					"test tool.\n");
1898 #else
1899 			printk(KERN_ERR "Run a memory test tool.\n");
1900 #endif
1901 		}
1902 	}
1903 }
1904 #endif
1905 
1906 #if DEBUG
1907 
1908 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1909 {
1910 	int i, size;
1911 	char *realobj;
1912 
1913 	if (cachep->flags & SLAB_RED_ZONE) {
1914 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1915 			*dbg_redzone1(cachep, objp),
1916 			*dbg_redzone2(cachep, objp));
1917 	}
1918 
1919 	if (cachep->flags & SLAB_STORE_USER) {
1920 		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1921 		       *dbg_userword(cachep, objp),
1922 		       *dbg_userword(cachep, objp));
1923 	}
1924 	realobj = (char *)objp + obj_offset(cachep);
1925 	size = cachep->object_size;
1926 	for (i = 0; i < size && lines; i += 16, lines--) {
1927 		int limit;
1928 		limit = 16;
1929 		if (i + limit > size)
1930 			limit = size - i;
1931 		dump_line(realobj, i, limit);
1932 	}
1933 }
1934 
1935 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1936 {
1937 	char *realobj;
1938 	int size, i;
1939 	int lines = 0;
1940 
1941 	realobj = (char *)objp + obj_offset(cachep);
1942 	size = cachep->object_size;
1943 
1944 	for (i = 0; i < size; i++) {
1945 		char exp = POISON_FREE;
1946 		if (i == size - 1)
1947 			exp = POISON_END;
1948 		if (realobj[i] != exp) {
1949 			int limit;
1950 			/* Mismatch ! */
1951 			/* Print header */
1952 			if (lines == 0) {
1953 				printk(KERN_ERR
1954 					"Slab corruption (%s): %s start=%p, len=%d\n",
1955 					print_tainted(), cachep->name, realobj, size);
1956 				print_objinfo(cachep, objp, 0);
1957 			}
1958 			/* Hexdump the affected line */
1959 			i = (i / 16) * 16;
1960 			limit = 16;
1961 			if (i + limit > size)
1962 				limit = size - i;
1963 			dump_line(realobj, i, limit);
1964 			i += 16;
1965 			lines++;
1966 			/* Limit to 5 lines */
1967 			if (lines > 5)
1968 				break;
1969 		}
1970 	}
1971 	if (lines != 0) {
1972 		/* Print some data about the neighboring objects, if they
1973 		 * exist:
1974 		 */
1975 		struct slab *slabp = virt_to_slab(objp);
1976 		unsigned int objnr;
1977 
1978 		objnr = obj_to_index(cachep, slabp, objp);
1979 		if (objnr) {
1980 			objp = index_to_obj(cachep, slabp, objnr - 1);
1981 			realobj = (char *)objp + obj_offset(cachep);
1982 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1983 			       realobj, size);
1984 			print_objinfo(cachep, objp, 2);
1985 		}
1986 		if (objnr + 1 < cachep->num) {
1987 			objp = index_to_obj(cachep, slabp, objnr + 1);
1988 			realobj = (char *)objp + obj_offset(cachep);
1989 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1990 			       realobj, size);
1991 			print_objinfo(cachep, objp, 2);
1992 		}
1993 	}
1994 }
1995 #endif
1996 
1997 #if DEBUG
1998 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1999 {
2000 	int i;
2001 	for (i = 0; i < cachep->num; i++) {
2002 		void *objp = index_to_obj(cachep, slabp, i);
2003 
2004 		if (cachep->flags & SLAB_POISON) {
2005 #ifdef CONFIG_DEBUG_PAGEALLOC
2006 			if (cachep->size % PAGE_SIZE == 0 &&
2007 					OFF_SLAB(cachep))
2008 				kernel_map_pages(virt_to_page(objp),
2009 					cachep->size / PAGE_SIZE, 1);
2010 			else
2011 				check_poison_obj(cachep, objp);
2012 #else
2013 			check_poison_obj(cachep, objp);
2014 #endif
2015 		}
2016 		if (cachep->flags & SLAB_RED_ZONE) {
2017 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2018 				slab_error(cachep, "start of a freed object "
2019 					   "was overwritten");
2020 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2021 				slab_error(cachep, "end of a freed object "
2022 					   "was overwritten");
2023 		}
2024 	}
2025 }
2026 #else
2027 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2028 {
2029 }
2030 #endif
2031 
2032 /**
2033  * slab_destroy - destroy and release all objects in a slab
2034  * @cachep: cache pointer being destroyed
2035  * @slabp: slab pointer being destroyed
2036  *
2037  * Destroy all the objs in a slab, and release the mem back to the system.
2038  * Before calling the slab must have been unlinked from the cache.  The
2039  * cache-lock is not held/needed.
2040  */
2041 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2042 {
2043 	void *addr = slabp->s_mem - slabp->colouroff;
2044 
2045 	slab_destroy_debugcheck(cachep, slabp);
2046 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2047 		struct slab_rcu *slab_rcu;
2048 
2049 		slab_rcu = (struct slab_rcu *)slabp;
2050 		slab_rcu->cachep = cachep;
2051 		slab_rcu->addr = addr;
2052 		call_rcu(&slab_rcu->head, kmem_rcu_free);
2053 	} else {
2054 		kmem_freepages(cachep, addr);
2055 		if (OFF_SLAB(cachep))
2056 			kmem_cache_free(cachep->slabp_cache, slabp);
2057 	}
2058 }
2059 
2060 /**
2061  * calculate_slab_order - calculate size (page order) of slabs
2062  * @cachep: pointer to the cache that is being created
2063  * @size: size of objects to be created in this cache.
2064  * @align: required alignment for the objects.
2065  * @flags: slab allocation flags
2066  *
2067  * Also calculates the number of objects per slab.
2068  *
2069  * This could be made much more intelligent.  For now, try to avoid using
2070  * high order pages for slabs.  When the gfp() functions are more friendly
2071  * towards high-order requests, this should be changed.
2072  */
2073 static size_t calculate_slab_order(struct kmem_cache *cachep,
2074 			size_t size, size_t align, unsigned long flags)
2075 {
2076 	unsigned long offslab_limit;
2077 	size_t left_over = 0;
2078 	int gfporder;
2079 
2080 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2081 		unsigned int num;
2082 		size_t remainder;
2083 
2084 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2085 		if (!num)
2086 			continue;
2087 
2088 		if (flags & CFLGS_OFF_SLAB) {
2089 			/*
2090 			 * Max number of objs-per-slab for caches which
2091 			 * use off-slab slabs. Needed to avoid a possible
2092 			 * looping condition in cache_grow().
2093 			 */
2094 			offslab_limit = size - sizeof(struct slab);
2095 			offslab_limit /= sizeof(kmem_bufctl_t);
2096 
2097  			if (num > offslab_limit)
2098 				break;
2099 		}
2100 
2101 		/* Found something acceptable - save it away */
2102 		cachep->num = num;
2103 		cachep->gfporder = gfporder;
2104 		left_over = remainder;
2105 
2106 		/*
2107 		 * A VFS-reclaimable slab tends to have most allocations
2108 		 * as GFP_NOFS and we really don't want to have to be allocating
2109 		 * higher-order pages when we are unable to shrink dcache.
2110 		 */
2111 		if (flags & SLAB_RECLAIM_ACCOUNT)
2112 			break;
2113 
2114 		/*
2115 		 * Large number of objects is good, but very large slabs are
2116 		 * currently bad for the gfp()s.
2117 		 */
2118 		if (gfporder >= slab_max_order)
2119 			break;
2120 
2121 		/*
2122 		 * Acceptable internal fragmentation?
2123 		 */
2124 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2125 			break;
2126 	}
2127 	return left_over;
2128 }
2129 
2130 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2131 {
2132 	if (slab_state >= FULL)
2133 		return enable_cpucache(cachep, gfp);
2134 
2135 	if (slab_state == DOWN) {
2136 		/*
2137 		 * Note: Creation of first cache (kmem_cache).
2138 		 * The setup_node is taken care
2139 		 * of by the caller of __kmem_cache_create
2140 		 */
2141 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2142 		slab_state = PARTIAL;
2143 	} else if (slab_state == PARTIAL) {
2144 		/*
2145 		 * Note: the second kmem_cache_create must create the cache
2146 		 * that's used by kmalloc(24), otherwise the creation of
2147 		 * further caches will BUG().
2148 		 */
2149 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2150 
2151 		/*
2152 		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2153 		 * the second cache, then we need to set up all its node/,
2154 		 * otherwise the creation of further caches will BUG().
2155 		 */
2156 		set_up_node(cachep, SIZE_AC);
2157 		if (INDEX_AC == INDEX_NODE)
2158 			slab_state = PARTIAL_NODE;
2159 		else
2160 			slab_state = PARTIAL_ARRAYCACHE;
2161 	} else {
2162 		/* Remaining boot caches */
2163 		cachep->array[smp_processor_id()] =
2164 			kmalloc(sizeof(struct arraycache_init), gfp);
2165 
2166 		if (slab_state == PARTIAL_ARRAYCACHE) {
2167 			set_up_node(cachep, SIZE_NODE);
2168 			slab_state = PARTIAL_NODE;
2169 		} else {
2170 			int node;
2171 			for_each_online_node(node) {
2172 				cachep->node[node] =
2173 				    kmalloc_node(sizeof(struct kmem_cache_node),
2174 						gfp, node);
2175 				BUG_ON(!cachep->node[node]);
2176 				kmem_cache_node_init(cachep->node[node]);
2177 			}
2178 		}
2179 	}
2180 	cachep->node[numa_mem_id()]->next_reap =
2181 			jiffies + REAPTIMEOUT_LIST3 +
2182 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2183 
2184 	cpu_cache_get(cachep)->avail = 0;
2185 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2186 	cpu_cache_get(cachep)->batchcount = 1;
2187 	cpu_cache_get(cachep)->touched = 0;
2188 	cachep->batchcount = 1;
2189 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2190 	return 0;
2191 }
2192 
2193 /**
2194  * __kmem_cache_create - Create a cache.
2195  * @cachep: cache management descriptor
2196  * @flags: SLAB flags
2197  *
2198  * Returns a ptr to the cache on success, NULL on failure.
2199  * Cannot be called within a int, but can be interrupted.
2200  * The @ctor is run when new pages are allocated by the cache.
2201  *
2202  * The flags are
2203  *
2204  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2205  * to catch references to uninitialised memory.
2206  *
2207  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2208  * for buffer overruns.
2209  *
2210  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2211  * cacheline.  This can be beneficial if you're counting cycles as closely
2212  * as davem.
2213  */
2214 int
2215 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2216 {
2217 	size_t left_over, slab_size, ralign;
2218 	gfp_t gfp;
2219 	int err;
2220 	size_t size = cachep->size;
2221 
2222 #if DEBUG
2223 #if FORCED_DEBUG
2224 	/*
2225 	 * Enable redzoning and last user accounting, except for caches with
2226 	 * large objects, if the increased size would increase the object size
2227 	 * above the next power of two: caches with object sizes just above a
2228 	 * power of two have a significant amount of internal fragmentation.
2229 	 */
2230 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2231 						2 * sizeof(unsigned long long)))
2232 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2233 	if (!(flags & SLAB_DESTROY_BY_RCU))
2234 		flags |= SLAB_POISON;
2235 #endif
2236 	if (flags & SLAB_DESTROY_BY_RCU)
2237 		BUG_ON(flags & SLAB_POISON);
2238 #endif
2239 
2240 	/*
2241 	 * Check that size is in terms of words.  This is needed to avoid
2242 	 * unaligned accesses for some archs when redzoning is used, and makes
2243 	 * sure any on-slab bufctl's are also correctly aligned.
2244 	 */
2245 	if (size & (BYTES_PER_WORD - 1)) {
2246 		size += (BYTES_PER_WORD - 1);
2247 		size &= ~(BYTES_PER_WORD - 1);
2248 	}
2249 
2250 	/*
2251 	 * Redzoning and user store require word alignment or possibly larger.
2252 	 * Note this will be overridden by architecture or caller mandated
2253 	 * alignment if either is greater than BYTES_PER_WORD.
2254 	 */
2255 	if (flags & SLAB_STORE_USER)
2256 		ralign = BYTES_PER_WORD;
2257 
2258 	if (flags & SLAB_RED_ZONE) {
2259 		ralign = REDZONE_ALIGN;
2260 		/* If redzoning, ensure that the second redzone is suitably
2261 		 * aligned, by adjusting the object size accordingly. */
2262 		size += REDZONE_ALIGN - 1;
2263 		size &= ~(REDZONE_ALIGN - 1);
2264 	}
2265 
2266 	/* 3) caller mandated alignment */
2267 	if (ralign < cachep->align) {
2268 		ralign = cachep->align;
2269 	}
2270 	/* disable debug if necessary */
2271 	if (ralign > __alignof__(unsigned long long))
2272 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2273 	/*
2274 	 * 4) Store it.
2275 	 */
2276 	cachep->align = ralign;
2277 
2278 	if (slab_is_available())
2279 		gfp = GFP_KERNEL;
2280 	else
2281 		gfp = GFP_NOWAIT;
2282 
2283 	setup_node_pointer(cachep);
2284 #if DEBUG
2285 
2286 	/*
2287 	 * Both debugging options require word-alignment which is calculated
2288 	 * into align above.
2289 	 */
2290 	if (flags & SLAB_RED_ZONE) {
2291 		/* add space for red zone words */
2292 		cachep->obj_offset += sizeof(unsigned long long);
2293 		size += 2 * sizeof(unsigned long long);
2294 	}
2295 	if (flags & SLAB_STORE_USER) {
2296 		/* user store requires one word storage behind the end of
2297 		 * the real object. But if the second red zone needs to be
2298 		 * aligned to 64 bits, we must allow that much space.
2299 		 */
2300 		if (flags & SLAB_RED_ZONE)
2301 			size += REDZONE_ALIGN;
2302 		else
2303 			size += BYTES_PER_WORD;
2304 	}
2305 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2306 	if (size >= kmalloc_size(INDEX_NODE + 1)
2307 	    && cachep->object_size > cache_line_size()
2308 	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
2309 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2310 		size = PAGE_SIZE;
2311 	}
2312 #endif
2313 #endif
2314 
2315 	/*
2316 	 * Determine if the slab management is 'on' or 'off' slab.
2317 	 * (bootstrapping cannot cope with offslab caches so don't do
2318 	 * it too early on. Always use on-slab management when
2319 	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2320 	 */
2321 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2322 	    !(flags & SLAB_NOLEAKTRACE))
2323 		/*
2324 		 * Size is large, assume best to place the slab management obj
2325 		 * off-slab (should allow better packing of objs).
2326 		 */
2327 		flags |= CFLGS_OFF_SLAB;
2328 
2329 	size = ALIGN(size, cachep->align);
2330 
2331 	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2332 
2333 	if (!cachep->num)
2334 		return -E2BIG;
2335 
2336 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2337 			  + sizeof(struct slab), cachep->align);
2338 
2339 	/*
2340 	 * If the slab has been placed off-slab, and we have enough space then
2341 	 * move it on-slab. This is at the expense of any extra colouring.
2342 	 */
2343 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2344 		flags &= ~CFLGS_OFF_SLAB;
2345 		left_over -= slab_size;
2346 	}
2347 
2348 	if (flags & CFLGS_OFF_SLAB) {
2349 		/* really off slab. No need for manual alignment */
2350 		slab_size =
2351 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2352 
2353 #ifdef CONFIG_PAGE_POISONING
2354 		/* If we're going to use the generic kernel_map_pages()
2355 		 * poisoning, then it's going to smash the contents of
2356 		 * the redzone and userword anyhow, so switch them off.
2357 		 */
2358 		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2359 			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2360 #endif
2361 	}
2362 
2363 	cachep->colour_off = cache_line_size();
2364 	/* Offset must be a multiple of the alignment. */
2365 	if (cachep->colour_off < cachep->align)
2366 		cachep->colour_off = cachep->align;
2367 	cachep->colour = left_over / cachep->colour_off;
2368 	cachep->slab_size = slab_size;
2369 	cachep->flags = flags;
2370 	cachep->allocflags = 0;
2371 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2372 		cachep->allocflags |= GFP_DMA;
2373 	cachep->size = size;
2374 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2375 
2376 	if (flags & CFLGS_OFF_SLAB) {
2377 		cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
2378 		/*
2379 		 * This is a possibility for one of the malloc_sizes caches.
2380 		 * But since we go off slab only for object size greater than
2381 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2382 		 * this should not happen at all.
2383 		 * But leave a BUG_ON for some lucky dude.
2384 		 */
2385 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2386 	}
2387 
2388 	err = setup_cpu_cache(cachep, gfp);
2389 	if (err) {
2390 		__kmem_cache_shutdown(cachep);
2391 		return err;
2392 	}
2393 
2394 	if (flags & SLAB_DEBUG_OBJECTS) {
2395 		/*
2396 		 * Would deadlock through slab_destroy()->call_rcu()->
2397 		 * debug_object_activate()->kmem_cache_alloc().
2398 		 */
2399 		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2400 
2401 		slab_set_debugobj_lock_classes(cachep);
2402 	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2403 		on_slab_lock_classes(cachep);
2404 
2405 	return 0;
2406 }
2407 
2408 #if DEBUG
2409 static void check_irq_off(void)
2410 {
2411 	BUG_ON(!irqs_disabled());
2412 }
2413 
2414 static void check_irq_on(void)
2415 {
2416 	BUG_ON(irqs_disabled());
2417 }
2418 
2419 static void check_spinlock_acquired(struct kmem_cache *cachep)
2420 {
2421 #ifdef CONFIG_SMP
2422 	check_irq_off();
2423 	assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
2424 #endif
2425 }
2426 
2427 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2428 {
2429 #ifdef CONFIG_SMP
2430 	check_irq_off();
2431 	assert_spin_locked(&cachep->node[node]->list_lock);
2432 #endif
2433 }
2434 
2435 #else
2436 #define check_irq_off()	do { } while(0)
2437 #define check_irq_on()	do { } while(0)
2438 #define check_spinlock_acquired(x) do { } while(0)
2439 #define check_spinlock_acquired_node(x, y) do { } while(0)
2440 #endif
2441 
2442 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2443 			struct array_cache *ac,
2444 			int force, int node);
2445 
2446 static void do_drain(void *arg)
2447 {
2448 	struct kmem_cache *cachep = arg;
2449 	struct array_cache *ac;
2450 	int node = numa_mem_id();
2451 
2452 	check_irq_off();
2453 	ac = cpu_cache_get(cachep);
2454 	spin_lock(&cachep->node[node]->list_lock);
2455 	free_block(cachep, ac->entry, ac->avail, node);
2456 	spin_unlock(&cachep->node[node]->list_lock);
2457 	ac->avail = 0;
2458 }
2459 
2460 static void drain_cpu_caches(struct kmem_cache *cachep)
2461 {
2462 	struct kmem_cache_node *n;
2463 	int node;
2464 
2465 	on_each_cpu(do_drain, cachep, 1);
2466 	check_irq_on();
2467 	for_each_online_node(node) {
2468 		n = cachep->node[node];
2469 		if (n && n->alien)
2470 			drain_alien_cache(cachep, n->alien);
2471 	}
2472 
2473 	for_each_online_node(node) {
2474 		n = cachep->node[node];
2475 		if (n)
2476 			drain_array(cachep, n, n->shared, 1, node);
2477 	}
2478 }
2479 
2480 /*
2481  * Remove slabs from the list of free slabs.
2482  * Specify the number of slabs to drain in tofree.
2483  *
2484  * Returns the actual number of slabs released.
2485  */
2486 static int drain_freelist(struct kmem_cache *cache,
2487 			struct kmem_cache_node *n, int tofree)
2488 {
2489 	struct list_head *p;
2490 	int nr_freed;
2491 	struct slab *slabp;
2492 
2493 	nr_freed = 0;
2494 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2495 
2496 		spin_lock_irq(&n->list_lock);
2497 		p = n->slabs_free.prev;
2498 		if (p == &n->slabs_free) {
2499 			spin_unlock_irq(&n->list_lock);
2500 			goto out;
2501 		}
2502 
2503 		slabp = list_entry(p, struct slab, list);
2504 #if DEBUG
2505 		BUG_ON(slabp->inuse);
2506 #endif
2507 		list_del(&slabp->list);
2508 		/*
2509 		 * Safe to drop the lock. The slab is no longer linked
2510 		 * to the cache.
2511 		 */
2512 		n->free_objects -= cache->num;
2513 		spin_unlock_irq(&n->list_lock);
2514 		slab_destroy(cache, slabp);
2515 		nr_freed++;
2516 	}
2517 out:
2518 	return nr_freed;
2519 }
2520 
2521 /* Called with slab_mutex held to protect against cpu hotplug */
2522 static int __cache_shrink(struct kmem_cache *cachep)
2523 {
2524 	int ret = 0, i = 0;
2525 	struct kmem_cache_node *n;
2526 
2527 	drain_cpu_caches(cachep);
2528 
2529 	check_irq_on();
2530 	for_each_online_node(i) {
2531 		n = cachep->node[i];
2532 		if (!n)
2533 			continue;
2534 
2535 		drain_freelist(cachep, n, n->free_objects);
2536 
2537 		ret += !list_empty(&n->slabs_full) ||
2538 			!list_empty(&n->slabs_partial);
2539 	}
2540 	return (ret ? 1 : 0);
2541 }
2542 
2543 /**
2544  * kmem_cache_shrink - Shrink a cache.
2545  * @cachep: The cache to shrink.
2546  *
2547  * Releases as many slabs as possible for a cache.
2548  * To help debugging, a zero exit status indicates all slabs were released.
2549  */
2550 int kmem_cache_shrink(struct kmem_cache *cachep)
2551 {
2552 	int ret;
2553 	BUG_ON(!cachep || in_interrupt());
2554 
2555 	get_online_cpus();
2556 	mutex_lock(&slab_mutex);
2557 	ret = __cache_shrink(cachep);
2558 	mutex_unlock(&slab_mutex);
2559 	put_online_cpus();
2560 	return ret;
2561 }
2562 EXPORT_SYMBOL(kmem_cache_shrink);
2563 
2564 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2565 {
2566 	int i;
2567 	struct kmem_cache_node *n;
2568 	int rc = __cache_shrink(cachep);
2569 
2570 	if (rc)
2571 		return rc;
2572 
2573 	for_each_online_cpu(i)
2574 	    kfree(cachep->array[i]);
2575 
2576 	/* NUMA: free the node structures */
2577 	for_each_online_node(i) {
2578 		n = cachep->node[i];
2579 		if (n) {
2580 			kfree(n->shared);
2581 			free_alien_cache(n->alien);
2582 			kfree(n);
2583 		}
2584 	}
2585 	return 0;
2586 }
2587 
2588 /*
2589  * Get the memory for a slab management obj.
2590  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2591  * always come from malloc_sizes caches.  The slab descriptor cannot
2592  * come from the same cache which is getting created because,
2593  * when we are searching for an appropriate cache for these
2594  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2595  * If we are creating a malloc_sizes cache here it would not be visible to
2596  * kmem_find_general_cachep till the initialization is complete.
2597  * Hence we cannot have slabp_cache same as the original cache.
2598  */
2599 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2600 				   int colour_off, gfp_t local_flags,
2601 				   int nodeid)
2602 {
2603 	struct slab *slabp;
2604 
2605 	if (OFF_SLAB(cachep)) {
2606 		/* Slab management obj is off-slab. */
2607 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2608 					      local_flags, nodeid);
2609 		/*
2610 		 * If the first object in the slab is leaked (it's allocated
2611 		 * but no one has a reference to it), we want to make sure
2612 		 * kmemleak does not treat the ->s_mem pointer as a reference
2613 		 * to the object. Otherwise we will not report the leak.
2614 		 */
2615 		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2616 				   local_flags);
2617 		if (!slabp)
2618 			return NULL;
2619 	} else {
2620 		slabp = objp + colour_off;
2621 		colour_off += cachep->slab_size;
2622 	}
2623 	slabp->inuse = 0;
2624 	slabp->colouroff = colour_off;
2625 	slabp->s_mem = objp + colour_off;
2626 	slabp->nodeid = nodeid;
2627 	slabp->free = 0;
2628 	return slabp;
2629 }
2630 
2631 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2632 {
2633 	return (kmem_bufctl_t *) (slabp + 1);
2634 }
2635 
2636 static void cache_init_objs(struct kmem_cache *cachep,
2637 			    struct slab *slabp)
2638 {
2639 	int i;
2640 
2641 	for (i = 0; i < cachep->num; i++) {
2642 		void *objp = index_to_obj(cachep, slabp, i);
2643 #if DEBUG
2644 		/* need to poison the objs? */
2645 		if (cachep->flags & SLAB_POISON)
2646 			poison_obj(cachep, objp, POISON_FREE);
2647 		if (cachep->flags & SLAB_STORE_USER)
2648 			*dbg_userword(cachep, objp) = NULL;
2649 
2650 		if (cachep->flags & SLAB_RED_ZONE) {
2651 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2652 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2653 		}
2654 		/*
2655 		 * Constructors are not allowed to allocate memory from the same
2656 		 * cache which they are a constructor for.  Otherwise, deadlock.
2657 		 * They must also be threaded.
2658 		 */
2659 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2660 			cachep->ctor(objp + obj_offset(cachep));
2661 
2662 		if (cachep->flags & SLAB_RED_ZONE) {
2663 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2664 				slab_error(cachep, "constructor overwrote the"
2665 					   " end of an object");
2666 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2667 				slab_error(cachep, "constructor overwrote the"
2668 					   " start of an object");
2669 		}
2670 		if ((cachep->size % PAGE_SIZE) == 0 &&
2671 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2672 			kernel_map_pages(virt_to_page(objp),
2673 					 cachep->size / PAGE_SIZE, 0);
2674 #else
2675 		if (cachep->ctor)
2676 			cachep->ctor(objp);
2677 #endif
2678 		slab_bufctl(slabp)[i] = i + 1;
2679 	}
2680 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2681 }
2682 
2683 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2684 {
2685 	if (CONFIG_ZONE_DMA_FLAG) {
2686 		if (flags & GFP_DMA)
2687 			BUG_ON(!(cachep->allocflags & GFP_DMA));
2688 		else
2689 			BUG_ON(cachep->allocflags & GFP_DMA);
2690 	}
2691 }
2692 
2693 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2694 				int nodeid)
2695 {
2696 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2697 	kmem_bufctl_t next;
2698 
2699 	slabp->inuse++;
2700 	next = slab_bufctl(slabp)[slabp->free];
2701 #if DEBUG
2702 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2703 	WARN_ON(slabp->nodeid != nodeid);
2704 #endif
2705 	slabp->free = next;
2706 
2707 	return objp;
2708 }
2709 
2710 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2711 				void *objp, int nodeid)
2712 {
2713 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2714 
2715 #if DEBUG
2716 	/* Verify that the slab belongs to the intended node */
2717 	WARN_ON(slabp->nodeid != nodeid);
2718 
2719 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2720 		printk(KERN_ERR "slab: double free detected in cache "
2721 				"'%s', objp %p\n", cachep->name, objp);
2722 		BUG();
2723 	}
2724 #endif
2725 	slab_bufctl(slabp)[objnr] = slabp->free;
2726 	slabp->free = objnr;
2727 	slabp->inuse--;
2728 }
2729 
2730 /*
2731  * Map pages beginning at addr to the given cache and slab. This is required
2732  * for the slab allocator to be able to lookup the cache and slab of a
2733  * virtual address for kfree, ksize, and slab debugging.
2734  */
2735 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2736 			   void *addr)
2737 {
2738 	int nr_pages;
2739 	struct page *page;
2740 
2741 	page = virt_to_page(addr);
2742 
2743 	nr_pages = 1;
2744 	if (likely(!PageCompound(page)))
2745 		nr_pages <<= cache->gfporder;
2746 
2747 	do {
2748 		page->slab_cache = cache;
2749 		page->slab_page = slab;
2750 		page++;
2751 	} while (--nr_pages);
2752 }
2753 
2754 /*
2755  * Grow (by 1) the number of slabs within a cache.  This is called by
2756  * kmem_cache_alloc() when there are no active objs left in a cache.
2757  */
2758 static int cache_grow(struct kmem_cache *cachep,
2759 		gfp_t flags, int nodeid, void *objp)
2760 {
2761 	struct slab *slabp;
2762 	size_t offset;
2763 	gfp_t local_flags;
2764 	struct kmem_cache_node *n;
2765 
2766 	/*
2767 	 * Be lazy and only check for valid flags here,  keeping it out of the
2768 	 * critical path in kmem_cache_alloc().
2769 	 */
2770 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2771 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2772 
2773 	/* Take the node list lock to change the colour_next on this node */
2774 	check_irq_off();
2775 	n = cachep->node[nodeid];
2776 	spin_lock(&n->list_lock);
2777 
2778 	/* Get colour for the slab, and cal the next value. */
2779 	offset = n->colour_next;
2780 	n->colour_next++;
2781 	if (n->colour_next >= cachep->colour)
2782 		n->colour_next = 0;
2783 	spin_unlock(&n->list_lock);
2784 
2785 	offset *= cachep->colour_off;
2786 
2787 	if (local_flags & __GFP_WAIT)
2788 		local_irq_enable();
2789 
2790 	/*
2791 	 * The test for missing atomic flag is performed here, rather than
2792 	 * the more obvious place, simply to reduce the critical path length
2793 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2794 	 * will eventually be caught here (where it matters).
2795 	 */
2796 	kmem_flagcheck(cachep, flags);
2797 
2798 	/*
2799 	 * Get mem for the objs.  Attempt to allocate a physical page from
2800 	 * 'nodeid'.
2801 	 */
2802 	if (!objp)
2803 		objp = kmem_getpages(cachep, local_flags, nodeid);
2804 	if (!objp)
2805 		goto failed;
2806 
2807 	/* Get slab management. */
2808 	slabp = alloc_slabmgmt(cachep, objp, offset,
2809 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2810 	if (!slabp)
2811 		goto opps1;
2812 
2813 	slab_map_pages(cachep, slabp, objp);
2814 
2815 	cache_init_objs(cachep, slabp);
2816 
2817 	if (local_flags & __GFP_WAIT)
2818 		local_irq_disable();
2819 	check_irq_off();
2820 	spin_lock(&n->list_lock);
2821 
2822 	/* Make slab active. */
2823 	list_add_tail(&slabp->list, &(n->slabs_free));
2824 	STATS_INC_GROWN(cachep);
2825 	n->free_objects += cachep->num;
2826 	spin_unlock(&n->list_lock);
2827 	return 1;
2828 opps1:
2829 	kmem_freepages(cachep, objp);
2830 failed:
2831 	if (local_flags & __GFP_WAIT)
2832 		local_irq_disable();
2833 	return 0;
2834 }
2835 
2836 #if DEBUG
2837 
2838 /*
2839  * Perform extra freeing checks:
2840  * - detect bad pointers.
2841  * - POISON/RED_ZONE checking
2842  */
2843 static void kfree_debugcheck(const void *objp)
2844 {
2845 	if (!virt_addr_valid(objp)) {
2846 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2847 		       (unsigned long)objp);
2848 		BUG();
2849 	}
2850 }
2851 
2852 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2853 {
2854 	unsigned long long redzone1, redzone2;
2855 
2856 	redzone1 = *dbg_redzone1(cache, obj);
2857 	redzone2 = *dbg_redzone2(cache, obj);
2858 
2859 	/*
2860 	 * Redzone is ok.
2861 	 */
2862 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2863 		return;
2864 
2865 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2866 		slab_error(cache, "double free detected");
2867 	else
2868 		slab_error(cache, "memory outside object was overwritten");
2869 
2870 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2871 			obj, redzone1, redzone2);
2872 }
2873 
2874 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2875 				   unsigned long caller)
2876 {
2877 	struct page *page;
2878 	unsigned int objnr;
2879 	struct slab *slabp;
2880 
2881 	BUG_ON(virt_to_cache(objp) != cachep);
2882 
2883 	objp -= obj_offset(cachep);
2884 	kfree_debugcheck(objp);
2885 	page = virt_to_head_page(objp);
2886 
2887 	slabp = page->slab_page;
2888 
2889 	if (cachep->flags & SLAB_RED_ZONE) {
2890 		verify_redzone_free(cachep, objp);
2891 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2892 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2893 	}
2894 	if (cachep->flags & SLAB_STORE_USER)
2895 		*dbg_userword(cachep, objp) = (void *)caller;
2896 
2897 	objnr = obj_to_index(cachep, slabp, objp);
2898 
2899 	BUG_ON(objnr >= cachep->num);
2900 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2901 
2902 #ifdef CONFIG_DEBUG_SLAB_LEAK
2903 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2904 #endif
2905 	if (cachep->flags & SLAB_POISON) {
2906 #ifdef CONFIG_DEBUG_PAGEALLOC
2907 		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2908 			store_stackinfo(cachep, objp, caller);
2909 			kernel_map_pages(virt_to_page(objp),
2910 					 cachep->size / PAGE_SIZE, 0);
2911 		} else {
2912 			poison_obj(cachep, objp, POISON_FREE);
2913 		}
2914 #else
2915 		poison_obj(cachep, objp, POISON_FREE);
2916 #endif
2917 	}
2918 	return objp;
2919 }
2920 
2921 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2922 {
2923 	kmem_bufctl_t i;
2924 	int entries = 0;
2925 
2926 	/* Check slab's freelist to see if this obj is there. */
2927 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2928 		entries++;
2929 		if (entries > cachep->num || i >= cachep->num)
2930 			goto bad;
2931 	}
2932 	if (entries != cachep->num - slabp->inuse) {
2933 bad:
2934 		printk(KERN_ERR "slab: Internal list corruption detected in "
2935 			"cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
2936 			cachep->name, cachep->num, slabp, slabp->inuse,
2937 			print_tainted());
2938 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
2939 			sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
2940 			1);
2941 		BUG();
2942 	}
2943 }
2944 #else
2945 #define kfree_debugcheck(x) do { } while(0)
2946 #define cache_free_debugcheck(x,objp,z) (objp)
2947 #define check_slabp(x,y) do { } while(0)
2948 #endif
2949 
2950 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2951 							bool force_refill)
2952 {
2953 	int batchcount;
2954 	struct kmem_cache_node *n;
2955 	struct array_cache *ac;
2956 	int node;
2957 
2958 	check_irq_off();
2959 	node = numa_mem_id();
2960 	if (unlikely(force_refill))
2961 		goto force_grow;
2962 retry:
2963 	ac = cpu_cache_get(cachep);
2964 	batchcount = ac->batchcount;
2965 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2966 		/*
2967 		 * If there was little recent activity on this cache, then
2968 		 * perform only a partial refill.  Otherwise we could generate
2969 		 * refill bouncing.
2970 		 */
2971 		batchcount = BATCHREFILL_LIMIT;
2972 	}
2973 	n = cachep->node[node];
2974 
2975 	BUG_ON(ac->avail > 0 || !n);
2976 	spin_lock(&n->list_lock);
2977 
2978 	/* See if we can refill from the shared array */
2979 	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2980 		n->shared->touched = 1;
2981 		goto alloc_done;
2982 	}
2983 
2984 	while (batchcount > 0) {
2985 		struct list_head *entry;
2986 		struct slab *slabp;
2987 		/* Get slab alloc is to come from. */
2988 		entry = n->slabs_partial.next;
2989 		if (entry == &n->slabs_partial) {
2990 			n->free_touched = 1;
2991 			entry = n->slabs_free.next;
2992 			if (entry == &n->slabs_free)
2993 				goto must_grow;
2994 		}
2995 
2996 		slabp = list_entry(entry, struct slab, list);
2997 		check_slabp(cachep, slabp);
2998 		check_spinlock_acquired(cachep);
2999 
3000 		/*
3001 		 * The slab was either on partial or free list so
3002 		 * there must be at least one object available for
3003 		 * allocation.
3004 		 */
3005 		BUG_ON(slabp->inuse >= cachep->num);
3006 
3007 		while (slabp->inuse < cachep->num && batchcount--) {
3008 			STATS_INC_ALLOCED(cachep);
3009 			STATS_INC_ACTIVE(cachep);
3010 			STATS_SET_HIGH(cachep);
3011 
3012 			ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
3013 									node));
3014 		}
3015 		check_slabp(cachep, slabp);
3016 
3017 		/* move slabp to correct slabp list: */
3018 		list_del(&slabp->list);
3019 		if (slabp->free == BUFCTL_END)
3020 			list_add(&slabp->list, &n->slabs_full);
3021 		else
3022 			list_add(&slabp->list, &n->slabs_partial);
3023 	}
3024 
3025 must_grow:
3026 	n->free_objects -= ac->avail;
3027 alloc_done:
3028 	spin_unlock(&n->list_lock);
3029 
3030 	if (unlikely(!ac->avail)) {
3031 		int x;
3032 force_grow:
3033 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3034 
3035 		/* cache_grow can reenable interrupts, then ac could change. */
3036 		ac = cpu_cache_get(cachep);
3037 		node = numa_mem_id();
3038 
3039 		/* no objects in sight? abort */
3040 		if (!x && (ac->avail == 0 || force_refill))
3041 			return NULL;
3042 
3043 		if (!ac->avail)		/* objects refilled by interrupt? */
3044 			goto retry;
3045 	}
3046 	ac->touched = 1;
3047 
3048 	return ac_get_obj(cachep, ac, flags, force_refill);
3049 }
3050 
3051 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3052 						gfp_t flags)
3053 {
3054 	might_sleep_if(flags & __GFP_WAIT);
3055 #if DEBUG
3056 	kmem_flagcheck(cachep, flags);
3057 #endif
3058 }
3059 
3060 #if DEBUG
3061 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3062 				gfp_t flags, void *objp, unsigned long caller)
3063 {
3064 	if (!objp)
3065 		return objp;
3066 	if (cachep->flags & SLAB_POISON) {
3067 #ifdef CONFIG_DEBUG_PAGEALLOC
3068 		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3069 			kernel_map_pages(virt_to_page(objp),
3070 					 cachep->size / PAGE_SIZE, 1);
3071 		else
3072 			check_poison_obj(cachep, objp);
3073 #else
3074 		check_poison_obj(cachep, objp);
3075 #endif
3076 		poison_obj(cachep, objp, POISON_INUSE);
3077 	}
3078 	if (cachep->flags & SLAB_STORE_USER)
3079 		*dbg_userword(cachep, objp) = (void *)caller;
3080 
3081 	if (cachep->flags & SLAB_RED_ZONE) {
3082 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3083 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3084 			slab_error(cachep, "double free, or memory outside"
3085 						" object was overwritten");
3086 			printk(KERN_ERR
3087 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3088 				objp, *dbg_redzone1(cachep, objp),
3089 				*dbg_redzone2(cachep, objp));
3090 		}
3091 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3092 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3093 	}
3094 #ifdef CONFIG_DEBUG_SLAB_LEAK
3095 	{
3096 		struct slab *slabp;
3097 		unsigned objnr;
3098 
3099 		slabp = virt_to_head_page(objp)->slab_page;
3100 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
3101 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3102 	}
3103 #endif
3104 	objp += obj_offset(cachep);
3105 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3106 		cachep->ctor(objp);
3107 	if (ARCH_SLAB_MINALIGN &&
3108 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3109 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3110 		       objp, (int)ARCH_SLAB_MINALIGN);
3111 	}
3112 	return objp;
3113 }
3114 #else
3115 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3116 #endif
3117 
3118 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3119 {
3120 	if (cachep == kmem_cache)
3121 		return false;
3122 
3123 	return should_failslab(cachep->object_size, flags, cachep->flags);
3124 }
3125 
3126 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3127 {
3128 	void *objp;
3129 	struct array_cache *ac;
3130 	bool force_refill = false;
3131 
3132 	check_irq_off();
3133 
3134 	ac = cpu_cache_get(cachep);
3135 	if (likely(ac->avail)) {
3136 		ac->touched = 1;
3137 		objp = ac_get_obj(cachep, ac, flags, false);
3138 
3139 		/*
3140 		 * Allow for the possibility all avail objects are not allowed
3141 		 * by the current flags
3142 		 */
3143 		if (objp) {
3144 			STATS_INC_ALLOCHIT(cachep);
3145 			goto out;
3146 		}
3147 		force_refill = true;
3148 	}
3149 
3150 	STATS_INC_ALLOCMISS(cachep);
3151 	objp = cache_alloc_refill(cachep, flags, force_refill);
3152 	/*
3153 	 * the 'ac' may be updated by cache_alloc_refill(),
3154 	 * and kmemleak_erase() requires its correct value.
3155 	 */
3156 	ac = cpu_cache_get(cachep);
3157 
3158 out:
3159 	/*
3160 	 * To avoid a false negative, if an object that is in one of the
3161 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3162 	 * treat the array pointers as a reference to the object.
3163 	 */
3164 	if (objp)
3165 		kmemleak_erase(&ac->entry[ac->avail]);
3166 	return objp;
3167 }
3168 
3169 #ifdef CONFIG_NUMA
3170 /*
3171  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3172  *
3173  * If we are in_interrupt, then process context, including cpusets and
3174  * mempolicy, may not apply and should not be used for allocation policy.
3175  */
3176 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3177 {
3178 	int nid_alloc, nid_here;
3179 
3180 	if (in_interrupt() || (flags & __GFP_THISNODE))
3181 		return NULL;
3182 	nid_alloc = nid_here = numa_mem_id();
3183 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3184 		nid_alloc = cpuset_slab_spread_node();
3185 	else if (current->mempolicy)
3186 		nid_alloc = slab_node();
3187 	if (nid_alloc != nid_here)
3188 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3189 	return NULL;
3190 }
3191 
3192 /*
3193  * Fallback function if there was no memory available and no objects on a
3194  * certain node and fall back is permitted. First we scan all the
3195  * available node for available objects. If that fails then we
3196  * perform an allocation without specifying a node. This allows the page
3197  * allocator to do its reclaim / fallback magic. We then insert the
3198  * slab into the proper nodelist and then allocate from it.
3199  */
3200 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3201 {
3202 	struct zonelist *zonelist;
3203 	gfp_t local_flags;
3204 	struct zoneref *z;
3205 	struct zone *zone;
3206 	enum zone_type high_zoneidx = gfp_zone(flags);
3207 	void *obj = NULL;
3208 	int nid;
3209 	unsigned int cpuset_mems_cookie;
3210 
3211 	if (flags & __GFP_THISNODE)
3212 		return NULL;
3213 
3214 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3215 
3216 retry_cpuset:
3217 	cpuset_mems_cookie = get_mems_allowed();
3218 	zonelist = node_zonelist(slab_node(), flags);
3219 
3220 retry:
3221 	/*
3222 	 * Look through allowed nodes for objects available
3223 	 * from existing per node queues.
3224 	 */
3225 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3226 		nid = zone_to_nid(zone);
3227 
3228 		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3229 			cache->node[nid] &&
3230 			cache->node[nid]->free_objects) {
3231 				obj = ____cache_alloc_node(cache,
3232 					flags | GFP_THISNODE, nid);
3233 				if (obj)
3234 					break;
3235 		}
3236 	}
3237 
3238 	if (!obj) {
3239 		/*
3240 		 * This allocation will be performed within the constraints
3241 		 * of the current cpuset / memory policy requirements.
3242 		 * We may trigger various forms of reclaim on the allowed
3243 		 * set and go into memory reserves if necessary.
3244 		 */
3245 		if (local_flags & __GFP_WAIT)
3246 			local_irq_enable();
3247 		kmem_flagcheck(cache, flags);
3248 		obj = kmem_getpages(cache, local_flags, numa_mem_id());
3249 		if (local_flags & __GFP_WAIT)
3250 			local_irq_disable();
3251 		if (obj) {
3252 			/*
3253 			 * Insert into the appropriate per node queues
3254 			 */
3255 			nid = page_to_nid(virt_to_page(obj));
3256 			if (cache_grow(cache, flags, nid, obj)) {
3257 				obj = ____cache_alloc_node(cache,
3258 					flags | GFP_THISNODE, nid);
3259 				if (!obj)
3260 					/*
3261 					 * Another processor may allocate the
3262 					 * objects in the slab since we are
3263 					 * not holding any locks.
3264 					 */
3265 					goto retry;
3266 			} else {
3267 				/* cache_grow already freed obj */
3268 				obj = NULL;
3269 			}
3270 		}
3271 	}
3272 
3273 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
3274 		goto retry_cpuset;
3275 	return obj;
3276 }
3277 
3278 /*
3279  * A interface to enable slab creation on nodeid
3280  */
3281 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3282 				int nodeid)
3283 {
3284 	struct list_head *entry;
3285 	struct slab *slabp;
3286 	struct kmem_cache_node *n;
3287 	void *obj;
3288 	int x;
3289 
3290 	VM_BUG_ON(nodeid > num_online_nodes());
3291 	n = cachep->node[nodeid];
3292 	BUG_ON(!n);
3293 
3294 retry:
3295 	check_irq_off();
3296 	spin_lock(&n->list_lock);
3297 	entry = n->slabs_partial.next;
3298 	if (entry == &n->slabs_partial) {
3299 		n->free_touched = 1;
3300 		entry = n->slabs_free.next;
3301 		if (entry == &n->slabs_free)
3302 			goto must_grow;
3303 	}
3304 
3305 	slabp = list_entry(entry, struct slab, list);
3306 	check_spinlock_acquired_node(cachep, nodeid);
3307 	check_slabp(cachep, slabp);
3308 
3309 	STATS_INC_NODEALLOCS(cachep);
3310 	STATS_INC_ACTIVE(cachep);
3311 	STATS_SET_HIGH(cachep);
3312 
3313 	BUG_ON(slabp->inuse == cachep->num);
3314 
3315 	obj = slab_get_obj(cachep, slabp, nodeid);
3316 	check_slabp(cachep, slabp);
3317 	n->free_objects--;
3318 	/* move slabp to correct slabp list: */
3319 	list_del(&slabp->list);
3320 
3321 	if (slabp->free == BUFCTL_END)
3322 		list_add(&slabp->list, &n->slabs_full);
3323 	else
3324 		list_add(&slabp->list, &n->slabs_partial);
3325 
3326 	spin_unlock(&n->list_lock);
3327 	goto done;
3328 
3329 must_grow:
3330 	spin_unlock(&n->list_lock);
3331 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3332 	if (x)
3333 		goto retry;
3334 
3335 	return fallback_alloc(cachep, flags);
3336 
3337 done:
3338 	return obj;
3339 }
3340 
3341 /**
3342  * kmem_cache_alloc_node - Allocate an object on the specified node
3343  * @cachep: The cache to allocate from.
3344  * @flags: See kmalloc().
3345  * @nodeid: node number of the target node.
3346  * @caller: return address of caller, used for debug information
3347  *
3348  * Identical to kmem_cache_alloc but it will allocate memory on the given
3349  * node, which can improve the performance for cpu bound structures.
3350  *
3351  * Fallback to other node is possible if __GFP_THISNODE is not set.
3352  */
3353 static __always_inline void *
3354 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3355 		   unsigned long caller)
3356 {
3357 	unsigned long save_flags;
3358 	void *ptr;
3359 	int slab_node = numa_mem_id();
3360 
3361 	flags &= gfp_allowed_mask;
3362 
3363 	lockdep_trace_alloc(flags);
3364 
3365 	if (slab_should_failslab(cachep, flags))
3366 		return NULL;
3367 
3368 	cachep = memcg_kmem_get_cache(cachep, flags);
3369 
3370 	cache_alloc_debugcheck_before(cachep, flags);
3371 	local_irq_save(save_flags);
3372 
3373 	if (nodeid == NUMA_NO_NODE)
3374 		nodeid = slab_node;
3375 
3376 	if (unlikely(!cachep->node[nodeid])) {
3377 		/* Node not bootstrapped yet */
3378 		ptr = fallback_alloc(cachep, flags);
3379 		goto out;
3380 	}
3381 
3382 	if (nodeid == slab_node) {
3383 		/*
3384 		 * Use the locally cached objects if possible.
3385 		 * However ____cache_alloc does not allow fallback
3386 		 * to other nodes. It may fail while we still have
3387 		 * objects on other nodes available.
3388 		 */
3389 		ptr = ____cache_alloc(cachep, flags);
3390 		if (ptr)
3391 			goto out;
3392 	}
3393 	/* ___cache_alloc_node can fall back to other nodes */
3394 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3395   out:
3396 	local_irq_restore(save_flags);
3397 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3398 	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3399 				 flags);
3400 
3401 	if (likely(ptr))
3402 		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3403 
3404 	if (unlikely((flags & __GFP_ZERO) && ptr))
3405 		memset(ptr, 0, cachep->object_size);
3406 
3407 	return ptr;
3408 }
3409 
3410 static __always_inline void *
3411 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3412 {
3413 	void *objp;
3414 
3415 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3416 		objp = alternate_node_alloc(cache, flags);
3417 		if (objp)
3418 			goto out;
3419 	}
3420 	objp = ____cache_alloc(cache, flags);
3421 
3422 	/*
3423 	 * We may just have run out of memory on the local node.
3424 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3425 	 */
3426 	if (!objp)
3427 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3428 
3429   out:
3430 	return objp;
3431 }
3432 #else
3433 
3434 static __always_inline void *
3435 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3436 {
3437 	return ____cache_alloc(cachep, flags);
3438 }
3439 
3440 #endif /* CONFIG_NUMA */
3441 
3442 static __always_inline void *
3443 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3444 {
3445 	unsigned long save_flags;
3446 	void *objp;
3447 
3448 	flags &= gfp_allowed_mask;
3449 
3450 	lockdep_trace_alloc(flags);
3451 
3452 	if (slab_should_failslab(cachep, flags))
3453 		return NULL;
3454 
3455 	cachep = memcg_kmem_get_cache(cachep, flags);
3456 
3457 	cache_alloc_debugcheck_before(cachep, flags);
3458 	local_irq_save(save_flags);
3459 	objp = __do_cache_alloc(cachep, flags);
3460 	local_irq_restore(save_flags);
3461 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3462 	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3463 				 flags);
3464 	prefetchw(objp);
3465 
3466 	if (likely(objp))
3467 		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3468 
3469 	if (unlikely((flags & __GFP_ZERO) && objp))
3470 		memset(objp, 0, cachep->object_size);
3471 
3472 	return objp;
3473 }
3474 
3475 /*
3476  * Caller needs to acquire correct kmem_list's list_lock
3477  */
3478 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3479 		       int node)
3480 {
3481 	int i;
3482 	struct kmem_cache_node *n;
3483 
3484 	for (i = 0; i < nr_objects; i++) {
3485 		void *objp;
3486 		struct slab *slabp;
3487 
3488 		clear_obj_pfmemalloc(&objpp[i]);
3489 		objp = objpp[i];
3490 
3491 		slabp = virt_to_slab(objp);
3492 		n = cachep->node[node];
3493 		list_del(&slabp->list);
3494 		check_spinlock_acquired_node(cachep, node);
3495 		check_slabp(cachep, slabp);
3496 		slab_put_obj(cachep, slabp, objp, node);
3497 		STATS_DEC_ACTIVE(cachep);
3498 		n->free_objects++;
3499 		check_slabp(cachep, slabp);
3500 
3501 		/* fixup slab chains */
3502 		if (slabp->inuse == 0) {
3503 			if (n->free_objects > n->free_limit) {
3504 				n->free_objects -= cachep->num;
3505 				/* No need to drop any previously held
3506 				 * lock here, even if we have a off-slab slab
3507 				 * descriptor it is guaranteed to come from
3508 				 * a different cache, refer to comments before
3509 				 * alloc_slabmgmt.
3510 				 */
3511 				slab_destroy(cachep, slabp);
3512 			} else {
3513 				list_add(&slabp->list, &n->slabs_free);
3514 			}
3515 		} else {
3516 			/* Unconditionally move a slab to the end of the
3517 			 * partial list on free - maximum time for the
3518 			 * other objects to be freed, too.
3519 			 */
3520 			list_add_tail(&slabp->list, &n->slabs_partial);
3521 		}
3522 	}
3523 }
3524 
3525 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3526 {
3527 	int batchcount;
3528 	struct kmem_cache_node *n;
3529 	int node = numa_mem_id();
3530 
3531 	batchcount = ac->batchcount;
3532 #if DEBUG
3533 	BUG_ON(!batchcount || batchcount > ac->avail);
3534 #endif
3535 	check_irq_off();
3536 	n = cachep->node[node];
3537 	spin_lock(&n->list_lock);
3538 	if (n->shared) {
3539 		struct array_cache *shared_array = n->shared;
3540 		int max = shared_array->limit - shared_array->avail;
3541 		if (max) {
3542 			if (batchcount > max)
3543 				batchcount = max;
3544 			memcpy(&(shared_array->entry[shared_array->avail]),
3545 			       ac->entry, sizeof(void *) * batchcount);
3546 			shared_array->avail += batchcount;
3547 			goto free_done;
3548 		}
3549 	}
3550 
3551 	free_block(cachep, ac->entry, batchcount, node);
3552 free_done:
3553 #if STATS
3554 	{
3555 		int i = 0;
3556 		struct list_head *p;
3557 
3558 		p = n->slabs_free.next;
3559 		while (p != &(n->slabs_free)) {
3560 			struct slab *slabp;
3561 
3562 			slabp = list_entry(p, struct slab, list);
3563 			BUG_ON(slabp->inuse);
3564 
3565 			i++;
3566 			p = p->next;
3567 		}
3568 		STATS_SET_FREEABLE(cachep, i);
3569 	}
3570 #endif
3571 	spin_unlock(&n->list_lock);
3572 	ac->avail -= batchcount;
3573 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3574 }
3575 
3576 /*
3577  * Release an obj back to its cache. If the obj has a constructed state, it must
3578  * be in this state _before_ it is released.  Called with disabled ints.
3579  */
3580 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3581 				unsigned long caller)
3582 {
3583 	struct array_cache *ac = cpu_cache_get(cachep);
3584 
3585 	check_irq_off();
3586 	kmemleak_free_recursive(objp, cachep->flags);
3587 	objp = cache_free_debugcheck(cachep, objp, caller);
3588 
3589 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3590 
3591 	/*
3592 	 * Skip calling cache_free_alien() when the platform is not numa.
3593 	 * This will avoid cache misses that happen while accessing slabp (which
3594 	 * is per page memory  reference) to get nodeid. Instead use a global
3595 	 * variable to skip the call, which is mostly likely to be present in
3596 	 * the cache.
3597 	 */
3598 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3599 		return;
3600 
3601 	if (likely(ac->avail < ac->limit)) {
3602 		STATS_INC_FREEHIT(cachep);
3603 	} else {
3604 		STATS_INC_FREEMISS(cachep);
3605 		cache_flusharray(cachep, ac);
3606 	}
3607 
3608 	ac_put_obj(cachep, ac, objp);
3609 }
3610 
3611 /**
3612  * kmem_cache_alloc - Allocate an object
3613  * @cachep: The cache to allocate from.
3614  * @flags: See kmalloc().
3615  *
3616  * Allocate an object from this cache.  The flags are only relevant
3617  * if the cache has no available objects.
3618  */
3619 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3620 {
3621 	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3622 
3623 	trace_kmem_cache_alloc(_RET_IP_, ret,
3624 			       cachep->object_size, cachep->size, flags);
3625 
3626 	return ret;
3627 }
3628 EXPORT_SYMBOL(kmem_cache_alloc);
3629 
3630 #ifdef CONFIG_TRACING
3631 void *
3632 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3633 {
3634 	void *ret;
3635 
3636 	ret = slab_alloc(cachep, flags, _RET_IP_);
3637 
3638 	trace_kmalloc(_RET_IP_, ret,
3639 		      size, cachep->size, flags);
3640 	return ret;
3641 }
3642 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3643 #endif
3644 
3645 #ifdef CONFIG_NUMA
3646 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3647 {
3648 	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3649 
3650 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3651 				    cachep->object_size, cachep->size,
3652 				    flags, nodeid);
3653 
3654 	return ret;
3655 }
3656 EXPORT_SYMBOL(kmem_cache_alloc_node);
3657 
3658 #ifdef CONFIG_TRACING
3659 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3660 				  gfp_t flags,
3661 				  int nodeid,
3662 				  size_t size)
3663 {
3664 	void *ret;
3665 
3666 	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3667 
3668 	trace_kmalloc_node(_RET_IP_, ret,
3669 			   size, cachep->size,
3670 			   flags, nodeid);
3671 	return ret;
3672 }
3673 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3674 #endif
3675 
3676 static __always_inline void *
3677 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3678 {
3679 	struct kmem_cache *cachep;
3680 
3681 	cachep = kmalloc_slab(size, flags);
3682 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3683 		return cachep;
3684 	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3685 }
3686 
3687 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3688 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3689 {
3690 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3691 }
3692 EXPORT_SYMBOL(__kmalloc_node);
3693 
3694 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3695 		int node, unsigned long caller)
3696 {
3697 	return __do_kmalloc_node(size, flags, node, caller);
3698 }
3699 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3700 #else
3701 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3702 {
3703 	return __do_kmalloc_node(size, flags, node, 0);
3704 }
3705 EXPORT_SYMBOL(__kmalloc_node);
3706 #endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3707 #endif /* CONFIG_NUMA */
3708 
3709 /**
3710  * __do_kmalloc - allocate memory
3711  * @size: how many bytes of memory are required.
3712  * @flags: the type of memory to allocate (see kmalloc).
3713  * @caller: function caller for debug tracking of the caller
3714  */
3715 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3716 					  unsigned long caller)
3717 {
3718 	struct kmem_cache *cachep;
3719 	void *ret;
3720 
3721 	/* If you want to save a few bytes .text space: replace
3722 	 * __ with kmem_.
3723 	 * Then kmalloc uses the uninlined functions instead of the inline
3724 	 * functions.
3725 	 */
3726 	cachep = kmalloc_slab(size, flags);
3727 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3728 		return cachep;
3729 	ret = slab_alloc(cachep, flags, caller);
3730 
3731 	trace_kmalloc(caller, ret,
3732 		      size, cachep->size, flags);
3733 
3734 	return ret;
3735 }
3736 
3737 
3738 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3739 void *__kmalloc(size_t size, gfp_t flags)
3740 {
3741 	return __do_kmalloc(size, flags, _RET_IP_);
3742 }
3743 EXPORT_SYMBOL(__kmalloc);
3744 
3745 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3746 {
3747 	return __do_kmalloc(size, flags, caller);
3748 }
3749 EXPORT_SYMBOL(__kmalloc_track_caller);
3750 
3751 #else
3752 void *__kmalloc(size_t size, gfp_t flags)
3753 {
3754 	return __do_kmalloc(size, flags, 0);
3755 }
3756 EXPORT_SYMBOL(__kmalloc);
3757 #endif
3758 
3759 /**
3760  * kmem_cache_free - Deallocate an object
3761  * @cachep: The cache the allocation was from.
3762  * @objp: The previously allocated object.
3763  *
3764  * Free an object which was previously allocated from this
3765  * cache.
3766  */
3767 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3768 {
3769 	unsigned long flags;
3770 	cachep = cache_from_obj(cachep, objp);
3771 	if (!cachep)
3772 		return;
3773 
3774 	local_irq_save(flags);
3775 	debug_check_no_locks_freed(objp, cachep->object_size);
3776 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3777 		debug_check_no_obj_freed(objp, cachep->object_size);
3778 	__cache_free(cachep, objp, _RET_IP_);
3779 	local_irq_restore(flags);
3780 
3781 	trace_kmem_cache_free(_RET_IP_, objp);
3782 }
3783 EXPORT_SYMBOL(kmem_cache_free);
3784 
3785 /**
3786  * kfree - free previously allocated memory
3787  * @objp: pointer returned by kmalloc.
3788  *
3789  * If @objp is NULL, no operation is performed.
3790  *
3791  * Don't free memory not originally allocated by kmalloc()
3792  * or you will run into trouble.
3793  */
3794 void kfree(const void *objp)
3795 {
3796 	struct kmem_cache *c;
3797 	unsigned long flags;
3798 
3799 	trace_kfree(_RET_IP_, objp);
3800 
3801 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3802 		return;
3803 	local_irq_save(flags);
3804 	kfree_debugcheck(objp);
3805 	c = virt_to_cache(objp);
3806 	debug_check_no_locks_freed(objp, c->object_size);
3807 
3808 	debug_check_no_obj_freed(objp, c->object_size);
3809 	__cache_free(c, (void *)objp, _RET_IP_);
3810 	local_irq_restore(flags);
3811 }
3812 EXPORT_SYMBOL(kfree);
3813 
3814 /*
3815  * This initializes kmem_cache_node or resizes various caches for all nodes.
3816  */
3817 static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3818 {
3819 	int node;
3820 	struct kmem_cache_node *n;
3821 	struct array_cache *new_shared;
3822 	struct array_cache **new_alien = NULL;
3823 
3824 	for_each_online_node(node) {
3825 
3826                 if (use_alien_caches) {
3827                         new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3828                         if (!new_alien)
3829                                 goto fail;
3830                 }
3831 
3832 		new_shared = NULL;
3833 		if (cachep->shared) {
3834 			new_shared = alloc_arraycache(node,
3835 				cachep->shared*cachep->batchcount,
3836 					0xbaadf00d, gfp);
3837 			if (!new_shared) {
3838 				free_alien_cache(new_alien);
3839 				goto fail;
3840 			}
3841 		}
3842 
3843 		n = cachep->node[node];
3844 		if (n) {
3845 			struct array_cache *shared = n->shared;
3846 
3847 			spin_lock_irq(&n->list_lock);
3848 
3849 			if (shared)
3850 				free_block(cachep, shared->entry,
3851 						shared->avail, node);
3852 
3853 			n->shared = new_shared;
3854 			if (!n->alien) {
3855 				n->alien = new_alien;
3856 				new_alien = NULL;
3857 			}
3858 			n->free_limit = (1 + nr_cpus_node(node)) *
3859 					cachep->batchcount + cachep->num;
3860 			spin_unlock_irq(&n->list_lock);
3861 			kfree(shared);
3862 			free_alien_cache(new_alien);
3863 			continue;
3864 		}
3865 		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3866 		if (!n) {
3867 			free_alien_cache(new_alien);
3868 			kfree(new_shared);
3869 			goto fail;
3870 		}
3871 
3872 		kmem_cache_node_init(n);
3873 		n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3874 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3875 		n->shared = new_shared;
3876 		n->alien = new_alien;
3877 		n->free_limit = (1 + nr_cpus_node(node)) *
3878 					cachep->batchcount + cachep->num;
3879 		cachep->node[node] = n;
3880 	}
3881 	return 0;
3882 
3883 fail:
3884 	if (!cachep->list.next) {
3885 		/* Cache is not active yet. Roll back what we did */
3886 		node--;
3887 		while (node >= 0) {
3888 			if (cachep->node[node]) {
3889 				n = cachep->node[node];
3890 
3891 				kfree(n->shared);
3892 				free_alien_cache(n->alien);
3893 				kfree(n);
3894 				cachep->node[node] = NULL;
3895 			}
3896 			node--;
3897 		}
3898 	}
3899 	return -ENOMEM;
3900 }
3901 
3902 struct ccupdate_struct {
3903 	struct kmem_cache *cachep;
3904 	struct array_cache *new[0];
3905 };
3906 
3907 static void do_ccupdate_local(void *info)
3908 {
3909 	struct ccupdate_struct *new = info;
3910 	struct array_cache *old;
3911 
3912 	check_irq_off();
3913 	old = cpu_cache_get(new->cachep);
3914 
3915 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3916 	new->new[smp_processor_id()] = old;
3917 }
3918 
3919 /* Always called with the slab_mutex held */
3920 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3921 				int batchcount, int shared, gfp_t gfp)
3922 {
3923 	struct ccupdate_struct *new;
3924 	int i;
3925 
3926 	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
3927 		      gfp);
3928 	if (!new)
3929 		return -ENOMEM;
3930 
3931 	for_each_online_cpu(i) {
3932 		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3933 						batchcount, gfp);
3934 		if (!new->new[i]) {
3935 			for (i--; i >= 0; i--)
3936 				kfree(new->new[i]);
3937 			kfree(new);
3938 			return -ENOMEM;
3939 		}
3940 	}
3941 	new->cachep = cachep;
3942 
3943 	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3944 
3945 	check_irq_on();
3946 	cachep->batchcount = batchcount;
3947 	cachep->limit = limit;
3948 	cachep->shared = shared;
3949 
3950 	for_each_online_cpu(i) {
3951 		struct array_cache *ccold = new->new[i];
3952 		if (!ccold)
3953 			continue;
3954 		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3955 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3956 		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3957 		kfree(ccold);
3958 	}
3959 	kfree(new);
3960 	return alloc_kmemlist(cachep, gfp);
3961 }
3962 
3963 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3964 				int batchcount, int shared, gfp_t gfp)
3965 {
3966 	int ret;
3967 	struct kmem_cache *c = NULL;
3968 	int i = 0;
3969 
3970 	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3971 
3972 	if (slab_state < FULL)
3973 		return ret;
3974 
3975 	if ((ret < 0) || !is_root_cache(cachep))
3976 		return ret;
3977 
3978 	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
3979 	for_each_memcg_cache_index(i) {
3980 		c = cache_from_memcg(cachep, i);
3981 		if (c)
3982 			/* return value determined by the parent cache only */
3983 			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3984 	}
3985 
3986 	return ret;
3987 }
3988 
3989 /* Called with slab_mutex held always */
3990 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3991 {
3992 	int err;
3993 	int limit = 0;
3994 	int shared = 0;
3995 	int batchcount = 0;
3996 
3997 	if (!is_root_cache(cachep)) {
3998 		struct kmem_cache *root = memcg_root_cache(cachep);
3999 		limit = root->limit;
4000 		shared = root->shared;
4001 		batchcount = root->batchcount;
4002 	}
4003 
4004 	if (limit && shared && batchcount)
4005 		goto skip_setup;
4006 	/*
4007 	 * The head array serves three purposes:
4008 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4009 	 * - reduce the number of spinlock operations.
4010 	 * - reduce the number of linked list operations on the slab and
4011 	 *   bufctl chains: array operations are cheaper.
4012 	 * The numbers are guessed, we should auto-tune as described by
4013 	 * Bonwick.
4014 	 */
4015 	if (cachep->size > 131072)
4016 		limit = 1;
4017 	else if (cachep->size > PAGE_SIZE)
4018 		limit = 8;
4019 	else if (cachep->size > 1024)
4020 		limit = 24;
4021 	else if (cachep->size > 256)
4022 		limit = 54;
4023 	else
4024 		limit = 120;
4025 
4026 	/*
4027 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4028 	 * allocation behaviour: Most allocs on one cpu, most free operations
4029 	 * on another cpu. For these cases, an efficient object passing between
4030 	 * cpus is necessary. This is provided by a shared array. The array
4031 	 * replaces Bonwick's magazine layer.
4032 	 * On uniprocessor, it's functionally equivalent (but less efficient)
4033 	 * to a larger limit. Thus disabled by default.
4034 	 */
4035 	shared = 0;
4036 	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
4037 		shared = 8;
4038 
4039 #if DEBUG
4040 	/*
4041 	 * With debugging enabled, large batchcount lead to excessively long
4042 	 * periods with disabled local interrupts. Limit the batchcount
4043 	 */
4044 	if (limit > 32)
4045 		limit = 32;
4046 #endif
4047 	batchcount = (limit + 1) / 2;
4048 skip_setup:
4049 	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4050 	if (err)
4051 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4052 		       cachep->name, -err);
4053 	return err;
4054 }
4055 
4056 /*
4057  * Drain an array if it contains any elements taking the node lock only if
4058  * necessary. Note that the node listlock also protects the array_cache
4059  * if drain_array() is used on the shared array.
4060  */
4061 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4062 			 struct array_cache *ac, int force, int node)
4063 {
4064 	int tofree;
4065 
4066 	if (!ac || !ac->avail)
4067 		return;
4068 	if (ac->touched && !force) {
4069 		ac->touched = 0;
4070 	} else {
4071 		spin_lock_irq(&n->list_lock);
4072 		if (ac->avail) {
4073 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4074 			if (tofree > ac->avail)
4075 				tofree = (ac->avail + 1) / 2;
4076 			free_block(cachep, ac->entry, tofree, node);
4077 			ac->avail -= tofree;
4078 			memmove(ac->entry, &(ac->entry[tofree]),
4079 				sizeof(void *) * ac->avail);
4080 		}
4081 		spin_unlock_irq(&n->list_lock);
4082 	}
4083 }
4084 
4085 /**
4086  * cache_reap - Reclaim memory from caches.
4087  * @w: work descriptor
4088  *
4089  * Called from workqueue/eventd every few seconds.
4090  * Purpose:
4091  * - clear the per-cpu caches for this CPU.
4092  * - return freeable pages to the main free memory pool.
4093  *
4094  * If we cannot acquire the cache chain mutex then just give up - we'll try
4095  * again on the next iteration.
4096  */
4097 static void cache_reap(struct work_struct *w)
4098 {
4099 	struct kmem_cache *searchp;
4100 	struct kmem_cache_node *n;
4101 	int node = numa_mem_id();
4102 	struct delayed_work *work = to_delayed_work(w);
4103 
4104 	if (!mutex_trylock(&slab_mutex))
4105 		/* Give up. Setup the next iteration. */
4106 		goto out;
4107 
4108 	list_for_each_entry(searchp, &slab_caches, list) {
4109 		check_irq_on();
4110 
4111 		/*
4112 		 * We only take the node lock if absolutely necessary and we
4113 		 * have established with reasonable certainty that
4114 		 * we can do some work if the lock was obtained.
4115 		 */
4116 		n = searchp->node[node];
4117 
4118 		reap_alien(searchp, n);
4119 
4120 		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
4121 
4122 		/*
4123 		 * These are racy checks but it does not matter
4124 		 * if we skip one check or scan twice.
4125 		 */
4126 		if (time_after(n->next_reap, jiffies))
4127 			goto next;
4128 
4129 		n->next_reap = jiffies + REAPTIMEOUT_LIST3;
4130 
4131 		drain_array(searchp, n, n->shared, 0, node);
4132 
4133 		if (n->free_touched)
4134 			n->free_touched = 0;
4135 		else {
4136 			int freed;
4137 
4138 			freed = drain_freelist(searchp, n, (n->free_limit +
4139 				5 * searchp->num - 1) / (5 * searchp->num));
4140 			STATS_ADD_REAPED(searchp, freed);
4141 		}
4142 next:
4143 		cond_resched();
4144 	}
4145 	check_irq_on();
4146 	mutex_unlock(&slab_mutex);
4147 	next_reap_node();
4148 out:
4149 	/* Set up the next iteration */
4150 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4151 }
4152 
4153 #ifdef CONFIG_SLABINFO
4154 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4155 {
4156 	struct slab *slabp;
4157 	unsigned long active_objs;
4158 	unsigned long num_objs;
4159 	unsigned long active_slabs = 0;
4160 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4161 	const char *name;
4162 	char *error = NULL;
4163 	int node;
4164 	struct kmem_cache_node *n;
4165 
4166 	active_objs = 0;
4167 	num_slabs = 0;
4168 	for_each_online_node(node) {
4169 		n = cachep->node[node];
4170 		if (!n)
4171 			continue;
4172 
4173 		check_irq_on();
4174 		spin_lock_irq(&n->list_lock);
4175 
4176 		list_for_each_entry(slabp, &n->slabs_full, list) {
4177 			if (slabp->inuse != cachep->num && !error)
4178 				error = "slabs_full accounting error";
4179 			active_objs += cachep->num;
4180 			active_slabs++;
4181 		}
4182 		list_for_each_entry(slabp, &n->slabs_partial, list) {
4183 			if (slabp->inuse == cachep->num && !error)
4184 				error = "slabs_partial inuse accounting error";
4185 			if (!slabp->inuse && !error)
4186 				error = "slabs_partial/inuse accounting error";
4187 			active_objs += slabp->inuse;
4188 			active_slabs++;
4189 		}
4190 		list_for_each_entry(slabp, &n->slabs_free, list) {
4191 			if (slabp->inuse && !error)
4192 				error = "slabs_free/inuse accounting error";
4193 			num_slabs++;
4194 		}
4195 		free_objects += n->free_objects;
4196 		if (n->shared)
4197 			shared_avail += n->shared->avail;
4198 
4199 		spin_unlock_irq(&n->list_lock);
4200 	}
4201 	num_slabs += active_slabs;
4202 	num_objs = num_slabs * cachep->num;
4203 	if (num_objs - active_objs != free_objects && !error)
4204 		error = "free_objects accounting error";
4205 
4206 	name = cachep->name;
4207 	if (error)
4208 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4209 
4210 	sinfo->active_objs = active_objs;
4211 	sinfo->num_objs = num_objs;
4212 	sinfo->active_slabs = active_slabs;
4213 	sinfo->num_slabs = num_slabs;
4214 	sinfo->shared_avail = shared_avail;
4215 	sinfo->limit = cachep->limit;
4216 	sinfo->batchcount = cachep->batchcount;
4217 	sinfo->shared = cachep->shared;
4218 	sinfo->objects_per_slab = cachep->num;
4219 	sinfo->cache_order = cachep->gfporder;
4220 }
4221 
4222 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4223 {
4224 #if STATS
4225 	{			/* node stats */
4226 		unsigned long high = cachep->high_mark;
4227 		unsigned long allocs = cachep->num_allocations;
4228 		unsigned long grown = cachep->grown;
4229 		unsigned long reaped = cachep->reaped;
4230 		unsigned long errors = cachep->errors;
4231 		unsigned long max_freeable = cachep->max_freeable;
4232 		unsigned long node_allocs = cachep->node_allocs;
4233 		unsigned long node_frees = cachep->node_frees;
4234 		unsigned long overflows = cachep->node_overflow;
4235 
4236 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4237 			   "%4lu %4lu %4lu %4lu %4lu",
4238 			   allocs, high, grown,
4239 			   reaped, errors, max_freeable, node_allocs,
4240 			   node_frees, overflows);
4241 	}
4242 	/* cpu stats */
4243 	{
4244 		unsigned long allochit = atomic_read(&cachep->allochit);
4245 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4246 		unsigned long freehit = atomic_read(&cachep->freehit);
4247 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4248 
4249 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4250 			   allochit, allocmiss, freehit, freemiss);
4251 	}
4252 #endif
4253 }
4254 
4255 #define MAX_SLABINFO_WRITE 128
4256 /**
4257  * slabinfo_write - Tuning for the slab allocator
4258  * @file: unused
4259  * @buffer: user buffer
4260  * @count: data length
4261  * @ppos: unused
4262  */
4263 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4264 		       size_t count, loff_t *ppos)
4265 {
4266 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4267 	int limit, batchcount, shared, res;
4268 	struct kmem_cache *cachep;
4269 
4270 	if (count > MAX_SLABINFO_WRITE)
4271 		return -EINVAL;
4272 	if (copy_from_user(&kbuf, buffer, count))
4273 		return -EFAULT;
4274 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4275 
4276 	tmp = strchr(kbuf, ' ');
4277 	if (!tmp)
4278 		return -EINVAL;
4279 	*tmp = '\0';
4280 	tmp++;
4281 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4282 		return -EINVAL;
4283 
4284 	/* Find the cache in the chain of caches. */
4285 	mutex_lock(&slab_mutex);
4286 	res = -EINVAL;
4287 	list_for_each_entry(cachep, &slab_caches, list) {
4288 		if (!strcmp(cachep->name, kbuf)) {
4289 			if (limit < 1 || batchcount < 1 ||
4290 					batchcount > limit || shared < 0) {
4291 				res = 0;
4292 			} else {
4293 				res = do_tune_cpucache(cachep, limit,
4294 						       batchcount, shared,
4295 						       GFP_KERNEL);
4296 			}
4297 			break;
4298 		}
4299 	}
4300 	mutex_unlock(&slab_mutex);
4301 	if (res >= 0)
4302 		res = count;
4303 	return res;
4304 }
4305 
4306 #ifdef CONFIG_DEBUG_SLAB_LEAK
4307 
4308 static void *leaks_start(struct seq_file *m, loff_t *pos)
4309 {
4310 	mutex_lock(&slab_mutex);
4311 	return seq_list_start(&slab_caches, *pos);
4312 }
4313 
4314 static inline int add_caller(unsigned long *n, unsigned long v)
4315 {
4316 	unsigned long *p;
4317 	int l;
4318 	if (!v)
4319 		return 1;
4320 	l = n[1];
4321 	p = n + 2;
4322 	while (l) {
4323 		int i = l/2;
4324 		unsigned long *q = p + 2 * i;
4325 		if (*q == v) {
4326 			q[1]++;
4327 			return 1;
4328 		}
4329 		if (*q > v) {
4330 			l = i;
4331 		} else {
4332 			p = q + 2;
4333 			l -= i + 1;
4334 		}
4335 	}
4336 	if (++n[1] == n[0])
4337 		return 0;
4338 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4339 	p[0] = v;
4340 	p[1] = 1;
4341 	return 1;
4342 }
4343 
4344 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4345 {
4346 	void *p;
4347 	int i;
4348 	if (n[0] == n[1])
4349 		return;
4350 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
4351 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4352 			continue;
4353 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4354 			return;
4355 	}
4356 }
4357 
4358 static void show_symbol(struct seq_file *m, unsigned long address)
4359 {
4360 #ifdef CONFIG_KALLSYMS
4361 	unsigned long offset, size;
4362 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4363 
4364 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4365 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4366 		if (modname[0])
4367 			seq_printf(m, " [%s]", modname);
4368 		return;
4369 	}
4370 #endif
4371 	seq_printf(m, "%p", (void *)address);
4372 }
4373 
4374 static int leaks_show(struct seq_file *m, void *p)
4375 {
4376 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4377 	struct slab *slabp;
4378 	struct kmem_cache_node *n;
4379 	const char *name;
4380 	unsigned long *x = m->private;
4381 	int node;
4382 	int i;
4383 
4384 	if (!(cachep->flags & SLAB_STORE_USER))
4385 		return 0;
4386 	if (!(cachep->flags & SLAB_RED_ZONE))
4387 		return 0;
4388 
4389 	/* OK, we can do it */
4390 
4391 	x[1] = 0;
4392 
4393 	for_each_online_node(node) {
4394 		n = cachep->node[node];
4395 		if (!n)
4396 			continue;
4397 
4398 		check_irq_on();
4399 		spin_lock_irq(&n->list_lock);
4400 
4401 		list_for_each_entry(slabp, &n->slabs_full, list)
4402 			handle_slab(x, cachep, slabp);
4403 		list_for_each_entry(slabp, &n->slabs_partial, list)
4404 			handle_slab(x, cachep, slabp);
4405 		spin_unlock_irq(&n->list_lock);
4406 	}
4407 	name = cachep->name;
4408 	if (x[0] == x[1]) {
4409 		/* Increase the buffer size */
4410 		mutex_unlock(&slab_mutex);
4411 		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4412 		if (!m->private) {
4413 			/* Too bad, we are really out */
4414 			m->private = x;
4415 			mutex_lock(&slab_mutex);
4416 			return -ENOMEM;
4417 		}
4418 		*(unsigned long *)m->private = x[0] * 2;
4419 		kfree(x);
4420 		mutex_lock(&slab_mutex);
4421 		/* Now make sure this entry will be retried */
4422 		m->count = m->size;
4423 		return 0;
4424 	}
4425 	for (i = 0; i < x[1]; i++) {
4426 		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4427 		show_symbol(m, x[2*i+2]);
4428 		seq_putc(m, '\n');
4429 	}
4430 
4431 	return 0;
4432 }
4433 
4434 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4435 {
4436 	return seq_list_next(p, &slab_caches, pos);
4437 }
4438 
4439 static void s_stop(struct seq_file *m, void *p)
4440 {
4441 	mutex_unlock(&slab_mutex);
4442 }
4443 
4444 static const struct seq_operations slabstats_op = {
4445 	.start = leaks_start,
4446 	.next = s_next,
4447 	.stop = s_stop,
4448 	.show = leaks_show,
4449 };
4450 
4451 static int slabstats_open(struct inode *inode, struct file *file)
4452 {
4453 	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4454 	int ret = -ENOMEM;
4455 	if (n) {
4456 		ret = seq_open(file, &slabstats_op);
4457 		if (!ret) {
4458 			struct seq_file *m = file->private_data;
4459 			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4460 			m->private = n;
4461 			n = NULL;
4462 		}
4463 		kfree(n);
4464 	}
4465 	return ret;
4466 }
4467 
4468 static const struct file_operations proc_slabstats_operations = {
4469 	.open		= slabstats_open,
4470 	.read		= seq_read,
4471 	.llseek		= seq_lseek,
4472 	.release	= seq_release_private,
4473 };
4474 #endif
4475 
4476 static int __init slab_proc_init(void)
4477 {
4478 #ifdef CONFIG_DEBUG_SLAB_LEAK
4479 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4480 #endif
4481 	return 0;
4482 }
4483 module_init(slab_proc_init);
4484 #endif
4485 
4486 /**
4487  * ksize - get the actual amount of memory allocated for a given object
4488  * @objp: Pointer to the object
4489  *
4490  * kmalloc may internally round up allocations and return more memory
4491  * than requested. ksize() can be used to determine the actual amount of
4492  * memory allocated. The caller may use this additional memory, even though
4493  * a smaller amount of memory was initially specified with the kmalloc call.
4494  * The caller must guarantee that objp points to a valid object previously
4495  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4496  * must not be freed during the duration of the call.
4497  */
4498 size_t ksize(const void *objp)
4499 {
4500 	BUG_ON(!objp);
4501 	if (unlikely(objp == ZERO_SIZE_PTR))
4502 		return 0;
4503 
4504 	return virt_to_cache(objp)->object_size;
4505 }
4506 EXPORT_SYMBOL(ksize);
4507