xref: /openbmc/linux/mm/slab.c (revision 0317cd52)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'slab_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<net/sock.h>
121 
122 #include	<asm/cacheflush.h>
123 #include	<asm/tlbflush.h>
124 #include	<asm/page.h>
125 
126 #include <trace/events/kmem.h>
127 
128 #include	"internal.h"
129 
130 #include	"slab.h"
131 
132 /*
133  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
134  *		  0 for faster, smaller code (especially in the critical paths).
135  *
136  * STATS	- 1 to collect stats for /proc/slabinfo.
137  *		  0 for faster, smaller code (especially in the critical paths).
138  *
139  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140  */
141 
142 #ifdef CONFIG_DEBUG_SLAB
143 #define	DEBUG		1
144 #define	STATS		1
145 #define	FORCED_DEBUG	1
146 #else
147 #define	DEBUG		0
148 #define	STATS		0
149 #define	FORCED_DEBUG	0
150 #endif
151 
152 /* Shouldn't this be in a header file somewhere? */
153 #define	BYTES_PER_WORD		sizeof(void *)
154 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
155 
156 #ifndef ARCH_KMALLOC_FLAGS
157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158 #endif
159 
160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162 
163 #if FREELIST_BYTE_INDEX
164 typedef unsigned char freelist_idx_t;
165 #else
166 typedef unsigned short freelist_idx_t;
167 #endif
168 
169 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170 
171 /*
172  * struct array_cache
173  *
174  * Purpose:
175  * - LIFO ordering, to hand out cache-warm objects from _alloc
176  * - reduce the number of linked list operations
177  * - reduce spinlock operations
178  *
179  * The limit is stored in the per-cpu structure to reduce the data cache
180  * footprint.
181  *
182  */
183 struct array_cache {
184 	unsigned int avail;
185 	unsigned int limit;
186 	unsigned int batchcount;
187 	unsigned int touched;
188 	void *entry[];	/*
189 			 * Must have this definition in here for the proper
190 			 * alignment of array_cache. Also simplifies accessing
191 			 * the entries.
192 			 */
193 };
194 
195 struct alien_cache {
196 	spinlock_t lock;
197 	struct array_cache ac;
198 };
199 
200 /*
201  * Need this for bootstrapping a per node allocator.
202  */
203 #define NUM_INIT_LISTS (2 * MAX_NUMNODES)
204 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
205 #define	CACHE_CACHE 0
206 #define	SIZE_NODE (MAX_NUMNODES)
207 
208 static int drain_freelist(struct kmem_cache *cache,
209 			struct kmem_cache_node *n, int tofree);
210 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
211 			int node, struct list_head *list);
212 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
214 static void cache_reap(struct work_struct *unused);
215 
216 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
217 						void **list);
218 static inline void fixup_slab_list(struct kmem_cache *cachep,
219 				struct kmem_cache_node *n, struct page *page,
220 				void **list);
221 static int slab_early_init = 1;
222 
223 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
224 
225 static void kmem_cache_node_init(struct kmem_cache_node *parent)
226 {
227 	INIT_LIST_HEAD(&parent->slabs_full);
228 	INIT_LIST_HEAD(&parent->slabs_partial);
229 	INIT_LIST_HEAD(&parent->slabs_free);
230 	parent->shared = NULL;
231 	parent->alien = NULL;
232 	parent->colour_next = 0;
233 	spin_lock_init(&parent->list_lock);
234 	parent->free_objects = 0;
235 	parent->free_touched = 0;
236 }
237 
238 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
239 	do {								\
240 		INIT_LIST_HEAD(listp);					\
241 		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
242 	} while (0)
243 
244 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
245 	do {								\
246 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
247 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
248 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
249 	} while (0)
250 
251 #define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
252 #define CFLGS_OFF_SLAB		(0x80000000UL)
253 #define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
254 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
255 
256 #define BATCHREFILL_LIMIT	16
257 /*
258  * Optimization question: fewer reaps means less probability for unnessary
259  * cpucache drain/refill cycles.
260  *
261  * OTOH the cpuarrays can contain lots of objects,
262  * which could lock up otherwise freeable slabs.
263  */
264 #define REAPTIMEOUT_AC		(2*HZ)
265 #define REAPTIMEOUT_NODE	(4*HZ)
266 
267 #if STATS
268 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
269 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
270 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
271 #define	STATS_INC_GROWN(x)	((x)->grown++)
272 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
273 #define	STATS_SET_HIGH(x)						\
274 	do {								\
275 		if ((x)->num_active > (x)->high_mark)			\
276 			(x)->high_mark = (x)->num_active;		\
277 	} while (0)
278 #define	STATS_INC_ERR(x)	((x)->errors++)
279 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
280 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
281 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
282 #define	STATS_SET_FREEABLE(x, i)					\
283 	do {								\
284 		if ((x)->max_freeable < i)				\
285 			(x)->max_freeable = i;				\
286 	} while (0)
287 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
288 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
289 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
290 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
291 #else
292 #define	STATS_INC_ACTIVE(x)	do { } while (0)
293 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
294 #define	STATS_INC_ALLOCED(x)	do { } while (0)
295 #define	STATS_INC_GROWN(x)	do { } while (0)
296 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
297 #define	STATS_SET_HIGH(x)	do { } while (0)
298 #define	STATS_INC_ERR(x)	do { } while (0)
299 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
300 #define	STATS_INC_NODEFREES(x)	do { } while (0)
301 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
302 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
303 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
304 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
305 #define STATS_INC_FREEHIT(x)	do { } while (0)
306 #define STATS_INC_FREEMISS(x)	do { } while (0)
307 #endif
308 
309 #if DEBUG
310 
311 /*
312  * memory layout of objects:
313  * 0		: objp
314  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
315  * 		the end of an object is aligned with the end of the real
316  * 		allocation. Catches writes behind the end of the allocation.
317  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
318  * 		redzone word.
319  * cachep->obj_offset: The real object.
320  * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
321  * cachep->size - 1* BYTES_PER_WORD: last caller address
322  *					[BYTES_PER_WORD long]
323  */
324 static int obj_offset(struct kmem_cache *cachep)
325 {
326 	return cachep->obj_offset;
327 }
328 
329 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
330 {
331 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
332 	return (unsigned long long*) (objp + obj_offset(cachep) -
333 				      sizeof(unsigned long long));
334 }
335 
336 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
337 {
338 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
339 	if (cachep->flags & SLAB_STORE_USER)
340 		return (unsigned long long *)(objp + cachep->size -
341 					      sizeof(unsigned long long) -
342 					      REDZONE_ALIGN);
343 	return (unsigned long long *) (objp + cachep->size -
344 				       sizeof(unsigned long long));
345 }
346 
347 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
348 {
349 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
350 	return (void **)(objp + cachep->size - BYTES_PER_WORD);
351 }
352 
353 #else
354 
355 #define obj_offset(x)			0
356 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
357 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
358 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
359 
360 #endif
361 
362 #ifdef CONFIG_DEBUG_SLAB_LEAK
363 
364 static inline bool is_store_user_clean(struct kmem_cache *cachep)
365 {
366 	return atomic_read(&cachep->store_user_clean) == 1;
367 }
368 
369 static inline void set_store_user_clean(struct kmem_cache *cachep)
370 {
371 	atomic_set(&cachep->store_user_clean, 1);
372 }
373 
374 static inline void set_store_user_dirty(struct kmem_cache *cachep)
375 {
376 	if (is_store_user_clean(cachep))
377 		atomic_set(&cachep->store_user_clean, 0);
378 }
379 
380 #else
381 static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
382 
383 #endif
384 
385 /*
386  * Do not go above this order unless 0 objects fit into the slab or
387  * overridden on the command line.
388  */
389 #define	SLAB_MAX_ORDER_HI	1
390 #define	SLAB_MAX_ORDER_LO	0
391 static int slab_max_order = SLAB_MAX_ORDER_LO;
392 static bool slab_max_order_set __initdata;
393 
394 static inline struct kmem_cache *virt_to_cache(const void *obj)
395 {
396 	struct page *page = virt_to_head_page(obj);
397 	return page->slab_cache;
398 }
399 
400 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
401 				 unsigned int idx)
402 {
403 	return page->s_mem + cache->size * idx;
404 }
405 
406 /*
407  * We want to avoid an expensive divide : (offset / cache->size)
408  *   Using the fact that size is a constant for a particular cache,
409  *   we can replace (offset / cache->size) by
410  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
411  */
412 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
413 					const struct page *page, void *obj)
414 {
415 	u32 offset = (obj - page->s_mem);
416 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
417 }
418 
419 #define BOOT_CPUCACHE_ENTRIES	1
420 /* internal cache of cache description objs */
421 static struct kmem_cache kmem_cache_boot = {
422 	.batchcount = 1,
423 	.limit = BOOT_CPUCACHE_ENTRIES,
424 	.shared = 1,
425 	.size = sizeof(struct kmem_cache),
426 	.name = "kmem_cache",
427 };
428 
429 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
430 
431 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
432 {
433 	return this_cpu_ptr(cachep->cpu_cache);
434 }
435 
436 /*
437  * Calculate the number of objects and left-over bytes for a given buffer size.
438  */
439 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
440 		unsigned long flags, size_t *left_over)
441 {
442 	unsigned int num;
443 	size_t slab_size = PAGE_SIZE << gfporder;
444 
445 	/*
446 	 * The slab management structure can be either off the slab or
447 	 * on it. For the latter case, the memory allocated for a
448 	 * slab is used for:
449 	 *
450 	 * - @buffer_size bytes for each object
451 	 * - One freelist_idx_t for each object
452 	 *
453 	 * We don't need to consider alignment of freelist because
454 	 * freelist will be at the end of slab page. The objects will be
455 	 * at the correct alignment.
456 	 *
457 	 * If the slab management structure is off the slab, then the
458 	 * alignment will already be calculated into the size. Because
459 	 * the slabs are all pages aligned, the objects will be at the
460 	 * correct alignment when allocated.
461 	 */
462 	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
463 		num = slab_size / buffer_size;
464 		*left_over = slab_size % buffer_size;
465 	} else {
466 		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
467 		*left_over = slab_size %
468 			(buffer_size + sizeof(freelist_idx_t));
469 	}
470 
471 	return num;
472 }
473 
474 #if DEBUG
475 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
476 
477 static void __slab_error(const char *function, struct kmem_cache *cachep,
478 			char *msg)
479 {
480 	pr_err("slab error in %s(): cache `%s': %s\n",
481 	       function, cachep->name, msg);
482 	dump_stack();
483 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
484 }
485 #endif
486 
487 /*
488  * By default on NUMA we use alien caches to stage the freeing of
489  * objects allocated from other nodes. This causes massive memory
490  * inefficiencies when using fake NUMA setup to split memory into a
491  * large number of small nodes, so it can be disabled on the command
492  * line
493   */
494 
495 static int use_alien_caches __read_mostly = 1;
496 static int __init noaliencache_setup(char *s)
497 {
498 	use_alien_caches = 0;
499 	return 1;
500 }
501 __setup("noaliencache", noaliencache_setup);
502 
503 static int __init slab_max_order_setup(char *str)
504 {
505 	get_option(&str, &slab_max_order);
506 	slab_max_order = slab_max_order < 0 ? 0 :
507 				min(slab_max_order, MAX_ORDER - 1);
508 	slab_max_order_set = true;
509 
510 	return 1;
511 }
512 __setup("slab_max_order=", slab_max_order_setup);
513 
514 #ifdef CONFIG_NUMA
515 /*
516  * Special reaping functions for NUMA systems called from cache_reap().
517  * These take care of doing round robin flushing of alien caches (containing
518  * objects freed on different nodes from which they were allocated) and the
519  * flushing of remote pcps by calling drain_node_pages.
520  */
521 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
522 
523 static void init_reap_node(int cpu)
524 {
525 	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
526 						    node_online_map);
527 }
528 
529 static void next_reap_node(void)
530 {
531 	int node = __this_cpu_read(slab_reap_node);
532 
533 	node = next_node_in(node, node_online_map);
534 	__this_cpu_write(slab_reap_node, node);
535 }
536 
537 #else
538 #define init_reap_node(cpu) do { } while (0)
539 #define next_reap_node(void) do { } while (0)
540 #endif
541 
542 /*
543  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
544  * via the workqueue/eventd.
545  * Add the CPU number into the expiration time to minimize the possibility of
546  * the CPUs getting into lockstep and contending for the global cache chain
547  * lock.
548  */
549 static void start_cpu_timer(int cpu)
550 {
551 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
552 
553 	/*
554 	 * When this gets called from do_initcalls via cpucache_init(),
555 	 * init_workqueues() has already run, so keventd will be setup
556 	 * at that time.
557 	 */
558 	if (keventd_up() && reap_work->work.func == NULL) {
559 		init_reap_node(cpu);
560 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
561 		schedule_delayed_work_on(cpu, reap_work,
562 					__round_jiffies_relative(HZ, cpu));
563 	}
564 }
565 
566 static void init_arraycache(struct array_cache *ac, int limit, int batch)
567 {
568 	/*
569 	 * The array_cache structures contain pointers to free object.
570 	 * However, when such objects are allocated or transferred to another
571 	 * cache the pointers are not cleared and they could be counted as
572 	 * valid references during a kmemleak scan. Therefore, kmemleak must
573 	 * not scan such objects.
574 	 */
575 	kmemleak_no_scan(ac);
576 	if (ac) {
577 		ac->avail = 0;
578 		ac->limit = limit;
579 		ac->batchcount = batch;
580 		ac->touched = 0;
581 	}
582 }
583 
584 static struct array_cache *alloc_arraycache(int node, int entries,
585 					    int batchcount, gfp_t gfp)
586 {
587 	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
588 	struct array_cache *ac = NULL;
589 
590 	ac = kmalloc_node(memsize, gfp, node);
591 	init_arraycache(ac, entries, batchcount);
592 	return ac;
593 }
594 
595 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
596 					struct page *page, void *objp)
597 {
598 	struct kmem_cache_node *n;
599 	int page_node;
600 	LIST_HEAD(list);
601 
602 	page_node = page_to_nid(page);
603 	n = get_node(cachep, page_node);
604 
605 	spin_lock(&n->list_lock);
606 	free_block(cachep, &objp, 1, page_node, &list);
607 	spin_unlock(&n->list_lock);
608 
609 	slabs_destroy(cachep, &list);
610 }
611 
612 /*
613  * Transfer objects in one arraycache to another.
614  * Locking must be handled by the caller.
615  *
616  * Return the number of entries transferred.
617  */
618 static int transfer_objects(struct array_cache *to,
619 		struct array_cache *from, unsigned int max)
620 {
621 	/* Figure out how many entries to transfer */
622 	int nr = min3(from->avail, max, to->limit - to->avail);
623 
624 	if (!nr)
625 		return 0;
626 
627 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
628 			sizeof(void *) *nr);
629 
630 	from->avail -= nr;
631 	to->avail += nr;
632 	return nr;
633 }
634 
635 #ifndef CONFIG_NUMA
636 
637 #define drain_alien_cache(cachep, alien) do { } while (0)
638 #define reap_alien(cachep, n) do { } while (0)
639 
640 static inline struct alien_cache **alloc_alien_cache(int node,
641 						int limit, gfp_t gfp)
642 {
643 	return NULL;
644 }
645 
646 static inline void free_alien_cache(struct alien_cache **ac_ptr)
647 {
648 }
649 
650 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
651 {
652 	return 0;
653 }
654 
655 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
656 		gfp_t flags)
657 {
658 	return NULL;
659 }
660 
661 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
662 		 gfp_t flags, int nodeid)
663 {
664 	return NULL;
665 }
666 
667 static inline gfp_t gfp_exact_node(gfp_t flags)
668 {
669 	return flags & ~__GFP_NOFAIL;
670 }
671 
672 #else	/* CONFIG_NUMA */
673 
674 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
675 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
676 
677 static struct alien_cache *__alloc_alien_cache(int node, int entries,
678 						int batch, gfp_t gfp)
679 {
680 	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
681 	struct alien_cache *alc = NULL;
682 
683 	alc = kmalloc_node(memsize, gfp, node);
684 	init_arraycache(&alc->ac, entries, batch);
685 	spin_lock_init(&alc->lock);
686 	return alc;
687 }
688 
689 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
690 {
691 	struct alien_cache **alc_ptr;
692 	size_t memsize = sizeof(void *) * nr_node_ids;
693 	int i;
694 
695 	if (limit > 1)
696 		limit = 12;
697 	alc_ptr = kzalloc_node(memsize, gfp, node);
698 	if (!alc_ptr)
699 		return NULL;
700 
701 	for_each_node(i) {
702 		if (i == node || !node_online(i))
703 			continue;
704 		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
705 		if (!alc_ptr[i]) {
706 			for (i--; i >= 0; i--)
707 				kfree(alc_ptr[i]);
708 			kfree(alc_ptr);
709 			return NULL;
710 		}
711 	}
712 	return alc_ptr;
713 }
714 
715 static void free_alien_cache(struct alien_cache **alc_ptr)
716 {
717 	int i;
718 
719 	if (!alc_ptr)
720 		return;
721 	for_each_node(i)
722 	    kfree(alc_ptr[i]);
723 	kfree(alc_ptr);
724 }
725 
726 static void __drain_alien_cache(struct kmem_cache *cachep,
727 				struct array_cache *ac, int node,
728 				struct list_head *list)
729 {
730 	struct kmem_cache_node *n = get_node(cachep, node);
731 
732 	if (ac->avail) {
733 		spin_lock(&n->list_lock);
734 		/*
735 		 * Stuff objects into the remote nodes shared array first.
736 		 * That way we could avoid the overhead of putting the objects
737 		 * into the free lists and getting them back later.
738 		 */
739 		if (n->shared)
740 			transfer_objects(n->shared, ac, ac->limit);
741 
742 		free_block(cachep, ac->entry, ac->avail, node, list);
743 		ac->avail = 0;
744 		spin_unlock(&n->list_lock);
745 	}
746 }
747 
748 /*
749  * Called from cache_reap() to regularly drain alien caches round robin.
750  */
751 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
752 {
753 	int node = __this_cpu_read(slab_reap_node);
754 
755 	if (n->alien) {
756 		struct alien_cache *alc = n->alien[node];
757 		struct array_cache *ac;
758 
759 		if (alc) {
760 			ac = &alc->ac;
761 			if (ac->avail && spin_trylock_irq(&alc->lock)) {
762 				LIST_HEAD(list);
763 
764 				__drain_alien_cache(cachep, ac, node, &list);
765 				spin_unlock_irq(&alc->lock);
766 				slabs_destroy(cachep, &list);
767 			}
768 		}
769 	}
770 }
771 
772 static void drain_alien_cache(struct kmem_cache *cachep,
773 				struct alien_cache **alien)
774 {
775 	int i = 0;
776 	struct alien_cache *alc;
777 	struct array_cache *ac;
778 	unsigned long flags;
779 
780 	for_each_online_node(i) {
781 		alc = alien[i];
782 		if (alc) {
783 			LIST_HEAD(list);
784 
785 			ac = &alc->ac;
786 			spin_lock_irqsave(&alc->lock, flags);
787 			__drain_alien_cache(cachep, ac, i, &list);
788 			spin_unlock_irqrestore(&alc->lock, flags);
789 			slabs_destroy(cachep, &list);
790 		}
791 	}
792 }
793 
794 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
795 				int node, int page_node)
796 {
797 	struct kmem_cache_node *n;
798 	struct alien_cache *alien = NULL;
799 	struct array_cache *ac;
800 	LIST_HEAD(list);
801 
802 	n = get_node(cachep, node);
803 	STATS_INC_NODEFREES(cachep);
804 	if (n->alien && n->alien[page_node]) {
805 		alien = n->alien[page_node];
806 		ac = &alien->ac;
807 		spin_lock(&alien->lock);
808 		if (unlikely(ac->avail == ac->limit)) {
809 			STATS_INC_ACOVERFLOW(cachep);
810 			__drain_alien_cache(cachep, ac, page_node, &list);
811 		}
812 		ac->entry[ac->avail++] = objp;
813 		spin_unlock(&alien->lock);
814 		slabs_destroy(cachep, &list);
815 	} else {
816 		n = get_node(cachep, page_node);
817 		spin_lock(&n->list_lock);
818 		free_block(cachep, &objp, 1, page_node, &list);
819 		spin_unlock(&n->list_lock);
820 		slabs_destroy(cachep, &list);
821 	}
822 	return 1;
823 }
824 
825 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
826 {
827 	int page_node = page_to_nid(virt_to_page(objp));
828 	int node = numa_mem_id();
829 	/*
830 	 * Make sure we are not freeing a object from another node to the array
831 	 * cache on this cpu.
832 	 */
833 	if (likely(node == page_node))
834 		return 0;
835 
836 	return __cache_free_alien(cachep, objp, node, page_node);
837 }
838 
839 /*
840  * Construct gfp mask to allocate from a specific node but do not reclaim or
841  * warn about failures.
842  */
843 static inline gfp_t gfp_exact_node(gfp_t flags)
844 {
845 	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
846 }
847 #endif
848 
849 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
850 {
851 	struct kmem_cache_node *n;
852 
853 	/*
854 	 * Set up the kmem_cache_node for cpu before we can
855 	 * begin anything. Make sure some other cpu on this
856 	 * node has not already allocated this
857 	 */
858 	n = get_node(cachep, node);
859 	if (n) {
860 		spin_lock_irq(&n->list_lock);
861 		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
862 				cachep->num;
863 		spin_unlock_irq(&n->list_lock);
864 
865 		return 0;
866 	}
867 
868 	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
869 	if (!n)
870 		return -ENOMEM;
871 
872 	kmem_cache_node_init(n);
873 	n->next_reap = jiffies + REAPTIMEOUT_NODE +
874 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
875 
876 	n->free_limit =
877 		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
878 
879 	/*
880 	 * The kmem_cache_nodes don't come and go as CPUs
881 	 * come and go.  slab_mutex is sufficient
882 	 * protection here.
883 	 */
884 	cachep->node[node] = n;
885 
886 	return 0;
887 }
888 
889 /*
890  * Allocates and initializes node for a node on each slab cache, used for
891  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
892  * will be allocated off-node since memory is not yet online for the new node.
893  * When hotplugging memory or a cpu, existing node are not replaced if
894  * already in use.
895  *
896  * Must hold slab_mutex.
897  */
898 static int init_cache_node_node(int node)
899 {
900 	int ret;
901 	struct kmem_cache *cachep;
902 
903 	list_for_each_entry(cachep, &slab_caches, list) {
904 		ret = init_cache_node(cachep, node, GFP_KERNEL);
905 		if (ret)
906 			return ret;
907 	}
908 
909 	return 0;
910 }
911 
912 static int setup_kmem_cache_node(struct kmem_cache *cachep,
913 				int node, gfp_t gfp, bool force_change)
914 {
915 	int ret = -ENOMEM;
916 	struct kmem_cache_node *n;
917 	struct array_cache *old_shared = NULL;
918 	struct array_cache *new_shared = NULL;
919 	struct alien_cache **new_alien = NULL;
920 	LIST_HEAD(list);
921 
922 	if (use_alien_caches) {
923 		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
924 		if (!new_alien)
925 			goto fail;
926 	}
927 
928 	if (cachep->shared) {
929 		new_shared = alloc_arraycache(node,
930 			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
931 		if (!new_shared)
932 			goto fail;
933 	}
934 
935 	ret = init_cache_node(cachep, node, gfp);
936 	if (ret)
937 		goto fail;
938 
939 	n = get_node(cachep, node);
940 	spin_lock_irq(&n->list_lock);
941 	if (n->shared && force_change) {
942 		free_block(cachep, n->shared->entry,
943 				n->shared->avail, node, &list);
944 		n->shared->avail = 0;
945 	}
946 
947 	if (!n->shared || force_change) {
948 		old_shared = n->shared;
949 		n->shared = new_shared;
950 		new_shared = NULL;
951 	}
952 
953 	if (!n->alien) {
954 		n->alien = new_alien;
955 		new_alien = NULL;
956 	}
957 
958 	spin_unlock_irq(&n->list_lock);
959 	slabs_destroy(cachep, &list);
960 
961 	/*
962 	 * To protect lockless access to n->shared during irq disabled context.
963 	 * If n->shared isn't NULL in irq disabled context, accessing to it is
964 	 * guaranteed to be valid until irq is re-enabled, because it will be
965 	 * freed after synchronize_sched().
966 	 */
967 	if (force_change)
968 		synchronize_sched();
969 
970 fail:
971 	kfree(old_shared);
972 	kfree(new_shared);
973 	free_alien_cache(new_alien);
974 
975 	return ret;
976 }
977 
978 static void cpuup_canceled(long cpu)
979 {
980 	struct kmem_cache *cachep;
981 	struct kmem_cache_node *n = NULL;
982 	int node = cpu_to_mem(cpu);
983 	const struct cpumask *mask = cpumask_of_node(node);
984 
985 	list_for_each_entry(cachep, &slab_caches, list) {
986 		struct array_cache *nc;
987 		struct array_cache *shared;
988 		struct alien_cache **alien;
989 		LIST_HEAD(list);
990 
991 		n = get_node(cachep, node);
992 		if (!n)
993 			continue;
994 
995 		spin_lock_irq(&n->list_lock);
996 
997 		/* Free limit for this kmem_cache_node */
998 		n->free_limit -= cachep->batchcount;
999 
1000 		/* cpu is dead; no one can alloc from it. */
1001 		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1002 		if (nc) {
1003 			free_block(cachep, nc->entry, nc->avail, node, &list);
1004 			nc->avail = 0;
1005 		}
1006 
1007 		if (!cpumask_empty(mask)) {
1008 			spin_unlock_irq(&n->list_lock);
1009 			goto free_slab;
1010 		}
1011 
1012 		shared = n->shared;
1013 		if (shared) {
1014 			free_block(cachep, shared->entry,
1015 				   shared->avail, node, &list);
1016 			n->shared = NULL;
1017 		}
1018 
1019 		alien = n->alien;
1020 		n->alien = NULL;
1021 
1022 		spin_unlock_irq(&n->list_lock);
1023 
1024 		kfree(shared);
1025 		if (alien) {
1026 			drain_alien_cache(cachep, alien);
1027 			free_alien_cache(alien);
1028 		}
1029 
1030 free_slab:
1031 		slabs_destroy(cachep, &list);
1032 	}
1033 	/*
1034 	 * In the previous loop, all the objects were freed to
1035 	 * the respective cache's slabs,  now we can go ahead and
1036 	 * shrink each nodelist to its limit.
1037 	 */
1038 	list_for_each_entry(cachep, &slab_caches, list) {
1039 		n = get_node(cachep, node);
1040 		if (!n)
1041 			continue;
1042 		drain_freelist(cachep, n, INT_MAX);
1043 	}
1044 }
1045 
1046 static int cpuup_prepare(long cpu)
1047 {
1048 	struct kmem_cache *cachep;
1049 	int node = cpu_to_mem(cpu);
1050 	int err;
1051 
1052 	/*
1053 	 * We need to do this right in the beginning since
1054 	 * alloc_arraycache's are going to use this list.
1055 	 * kmalloc_node allows us to add the slab to the right
1056 	 * kmem_cache_node and not this cpu's kmem_cache_node
1057 	 */
1058 	err = init_cache_node_node(node);
1059 	if (err < 0)
1060 		goto bad;
1061 
1062 	/*
1063 	 * Now we can go ahead with allocating the shared arrays and
1064 	 * array caches
1065 	 */
1066 	list_for_each_entry(cachep, &slab_caches, list) {
1067 		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1068 		if (err)
1069 			goto bad;
1070 	}
1071 
1072 	return 0;
1073 bad:
1074 	cpuup_canceled(cpu);
1075 	return -ENOMEM;
1076 }
1077 
1078 static int cpuup_callback(struct notifier_block *nfb,
1079 				    unsigned long action, void *hcpu)
1080 {
1081 	long cpu = (long)hcpu;
1082 	int err = 0;
1083 
1084 	switch (action) {
1085 	case CPU_UP_PREPARE:
1086 	case CPU_UP_PREPARE_FROZEN:
1087 		mutex_lock(&slab_mutex);
1088 		err = cpuup_prepare(cpu);
1089 		mutex_unlock(&slab_mutex);
1090 		break;
1091 	case CPU_ONLINE:
1092 	case CPU_ONLINE_FROZEN:
1093 		start_cpu_timer(cpu);
1094 		break;
1095 #ifdef CONFIG_HOTPLUG_CPU
1096   	case CPU_DOWN_PREPARE:
1097   	case CPU_DOWN_PREPARE_FROZEN:
1098 		/*
1099 		 * Shutdown cache reaper. Note that the slab_mutex is
1100 		 * held so that if cache_reap() is invoked it cannot do
1101 		 * anything expensive but will only modify reap_work
1102 		 * and reschedule the timer.
1103 		*/
1104 		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1105 		/* Now the cache_reaper is guaranteed to be not running. */
1106 		per_cpu(slab_reap_work, cpu).work.func = NULL;
1107   		break;
1108   	case CPU_DOWN_FAILED:
1109   	case CPU_DOWN_FAILED_FROZEN:
1110 		start_cpu_timer(cpu);
1111   		break;
1112 	case CPU_DEAD:
1113 	case CPU_DEAD_FROZEN:
1114 		/*
1115 		 * Even if all the cpus of a node are down, we don't free the
1116 		 * kmem_cache_node of any cache. This to avoid a race between
1117 		 * cpu_down, and a kmalloc allocation from another cpu for
1118 		 * memory from the node of the cpu going down.  The node
1119 		 * structure is usually allocated from kmem_cache_create() and
1120 		 * gets destroyed at kmem_cache_destroy().
1121 		 */
1122 		/* fall through */
1123 #endif
1124 	case CPU_UP_CANCELED:
1125 	case CPU_UP_CANCELED_FROZEN:
1126 		mutex_lock(&slab_mutex);
1127 		cpuup_canceled(cpu);
1128 		mutex_unlock(&slab_mutex);
1129 		break;
1130 	}
1131 	return notifier_from_errno(err);
1132 }
1133 
1134 static struct notifier_block cpucache_notifier = {
1135 	&cpuup_callback, NULL, 0
1136 };
1137 
1138 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1139 /*
1140  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1141  * Returns -EBUSY if all objects cannot be drained so that the node is not
1142  * removed.
1143  *
1144  * Must hold slab_mutex.
1145  */
1146 static int __meminit drain_cache_node_node(int node)
1147 {
1148 	struct kmem_cache *cachep;
1149 	int ret = 0;
1150 
1151 	list_for_each_entry(cachep, &slab_caches, list) {
1152 		struct kmem_cache_node *n;
1153 
1154 		n = get_node(cachep, node);
1155 		if (!n)
1156 			continue;
1157 
1158 		drain_freelist(cachep, n, INT_MAX);
1159 
1160 		if (!list_empty(&n->slabs_full) ||
1161 		    !list_empty(&n->slabs_partial)) {
1162 			ret = -EBUSY;
1163 			break;
1164 		}
1165 	}
1166 	return ret;
1167 }
1168 
1169 static int __meminit slab_memory_callback(struct notifier_block *self,
1170 					unsigned long action, void *arg)
1171 {
1172 	struct memory_notify *mnb = arg;
1173 	int ret = 0;
1174 	int nid;
1175 
1176 	nid = mnb->status_change_nid;
1177 	if (nid < 0)
1178 		goto out;
1179 
1180 	switch (action) {
1181 	case MEM_GOING_ONLINE:
1182 		mutex_lock(&slab_mutex);
1183 		ret = init_cache_node_node(nid);
1184 		mutex_unlock(&slab_mutex);
1185 		break;
1186 	case MEM_GOING_OFFLINE:
1187 		mutex_lock(&slab_mutex);
1188 		ret = drain_cache_node_node(nid);
1189 		mutex_unlock(&slab_mutex);
1190 		break;
1191 	case MEM_ONLINE:
1192 	case MEM_OFFLINE:
1193 	case MEM_CANCEL_ONLINE:
1194 	case MEM_CANCEL_OFFLINE:
1195 		break;
1196 	}
1197 out:
1198 	return notifier_from_errno(ret);
1199 }
1200 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1201 
1202 /*
1203  * swap the static kmem_cache_node with kmalloced memory
1204  */
1205 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1206 				int nodeid)
1207 {
1208 	struct kmem_cache_node *ptr;
1209 
1210 	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1211 	BUG_ON(!ptr);
1212 
1213 	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1214 	/*
1215 	 * Do not assume that spinlocks can be initialized via memcpy:
1216 	 */
1217 	spin_lock_init(&ptr->list_lock);
1218 
1219 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1220 	cachep->node[nodeid] = ptr;
1221 }
1222 
1223 /*
1224  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1225  * size of kmem_cache_node.
1226  */
1227 static void __init set_up_node(struct kmem_cache *cachep, int index)
1228 {
1229 	int node;
1230 
1231 	for_each_online_node(node) {
1232 		cachep->node[node] = &init_kmem_cache_node[index + node];
1233 		cachep->node[node]->next_reap = jiffies +
1234 		    REAPTIMEOUT_NODE +
1235 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1236 	}
1237 }
1238 
1239 /*
1240  * Initialisation.  Called after the page allocator have been initialised and
1241  * before smp_init().
1242  */
1243 void __init kmem_cache_init(void)
1244 {
1245 	int i;
1246 
1247 	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1248 					sizeof(struct rcu_head));
1249 	kmem_cache = &kmem_cache_boot;
1250 
1251 	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1252 		use_alien_caches = 0;
1253 
1254 	for (i = 0; i < NUM_INIT_LISTS; i++)
1255 		kmem_cache_node_init(&init_kmem_cache_node[i]);
1256 
1257 	/*
1258 	 * Fragmentation resistance on low memory - only use bigger
1259 	 * page orders on machines with more than 32MB of memory if
1260 	 * not overridden on the command line.
1261 	 */
1262 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1263 		slab_max_order = SLAB_MAX_ORDER_HI;
1264 
1265 	/* Bootstrap is tricky, because several objects are allocated
1266 	 * from caches that do not exist yet:
1267 	 * 1) initialize the kmem_cache cache: it contains the struct
1268 	 *    kmem_cache structures of all caches, except kmem_cache itself:
1269 	 *    kmem_cache is statically allocated.
1270 	 *    Initially an __init data area is used for the head array and the
1271 	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1272 	 *    array at the end of the bootstrap.
1273 	 * 2) Create the first kmalloc cache.
1274 	 *    The struct kmem_cache for the new cache is allocated normally.
1275 	 *    An __init data area is used for the head array.
1276 	 * 3) Create the remaining kmalloc caches, with minimally sized
1277 	 *    head arrays.
1278 	 * 4) Replace the __init data head arrays for kmem_cache and the first
1279 	 *    kmalloc cache with kmalloc allocated arrays.
1280 	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1281 	 *    the other cache's with kmalloc allocated memory.
1282 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1283 	 */
1284 
1285 	/* 1) create the kmem_cache */
1286 
1287 	/*
1288 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1289 	 */
1290 	create_boot_cache(kmem_cache, "kmem_cache",
1291 		offsetof(struct kmem_cache, node) +
1292 				  nr_node_ids * sizeof(struct kmem_cache_node *),
1293 				  SLAB_HWCACHE_ALIGN);
1294 	list_add(&kmem_cache->list, &slab_caches);
1295 	slab_state = PARTIAL;
1296 
1297 	/*
1298 	 * Initialize the caches that provide memory for the  kmem_cache_node
1299 	 * structures first.  Without this, further allocations will bug.
1300 	 */
1301 	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1302 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1303 	slab_state = PARTIAL_NODE;
1304 	setup_kmalloc_cache_index_table();
1305 
1306 	slab_early_init = 0;
1307 
1308 	/* 5) Replace the bootstrap kmem_cache_node */
1309 	{
1310 		int nid;
1311 
1312 		for_each_online_node(nid) {
1313 			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1314 
1315 			init_list(kmalloc_caches[INDEX_NODE],
1316 					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1317 		}
1318 	}
1319 
1320 	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1321 }
1322 
1323 void __init kmem_cache_init_late(void)
1324 {
1325 	struct kmem_cache *cachep;
1326 
1327 	slab_state = UP;
1328 
1329 	/* 6) resize the head arrays to their final sizes */
1330 	mutex_lock(&slab_mutex);
1331 	list_for_each_entry(cachep, &slab_caches, list)
1332 		if (enable_cpucache(cachep, GFP_NOWAIT))
1333 			BUG();
1334 	mutex_unlock(&slab_mutex);
1335 
1336 	/* Done! */
1337 	slab_state = FULL;
1338 
1339 	/*
1340 	 * Register a cpu startup notifier callback that initializes
1341 	 * cpu_cache_get for all new cpus
1342 	 */
1343 	register_cpu_notifier(&cpucache_notifier);
1344 
1345 #ifdef CONFIG_NUMA
1346 	/*
1347 	 * Register a memory hotplug callback that initializes and frees
1348 	 * node.
1349 	 */
1350 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1351 #endif
1352 
1353 	/*
1354 	 * The reap timers are started later, with a module init call: That part
1355 	 * of the kernel is not yet operational.
1356 	 */
1357 }
1358 
1359 static int __init cpucache_init(void)
1360 {
1361 	int cpu;
1362 
1363 	/*
1364 	 * Register the timers that return unneeded pages to the page allocator
1365 	 */
1366 	for_each_online_cpu(cpu)
1367 		start_cpu_timer(cpu);
1368 
1369 	/* Done! */
1370 	slab_state = FULL;
1371 	return 0;
1372 }
1373 __initcall(cpucache_init);
1374 
1375 static noinline void
1376 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1377 {
1378 #if DEBUG
1379 	struct kmem_cache_node *n;
1380 	struct page *page;
1381 	unsigned long flags;
1382 	int node;
1383 	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1384 				      DEFAULT_RATELIMIT_BURST);
1385 
1386 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1387 		return;
1388 
1389 	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1390 		nodeid, gfpflags, &gfpflags);
1391 	pr_warn("  cache: %s, object size: %d, order: %d\n",
1392 		cachep->name, cachep->size, cachep->gfporder);
1393 
1394 	for_each_kmem_cache_node(cachep, node, n) {
1395 		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1396 		unsigned long active_slabs = 0, num_slabs = 0;
1397 
1398 		spin_lock_irqsave(&n->list_lock, flags);
1399 		list_for_each_entry(page, &n->slabs_full, lru) {
1400 			active_objs += cachep->num;
1401 			active_slabs++;
1402 		}
1403 		list_for_each_entry(page, &n->slabs_partial, lru) {
1404 			active_objs += page->active;
1405 			active_slabs++;
1406 		}
1407 		list_for_each_entry(page, &n->slabs_free, lru)
1408 			num_slabs++;
1409 
1410 		free_objects += n->free_objects;
1411 		spin_unlock_irqrestore(&n->list_lock, flags);
1412 
1413 		num_slabs += active_slabs;
1414 		num_objs = num_slabs * cachep->num;
1415 		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1416 			node, active_slabs, num_slabs, active_objs, num_objs,
1417 			free_objects);
1418 	}
1419 #endif
1420 }
1421 
1422 /*
1423  * Interface to system's page allocator. No need to hold the
1424  * kmem_cache_node ->list_lock.
1425  *
1426  * If we requested dmaable memory, we will get it. Even if we
1427  * did not request dmaable memory, we might get it, but that
1428  * would be relatively rare and ignorable.
1429  */
1430 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1431 								int nodeid)
1432 {
1433 	struct page *page;
1434 	int nr_pages;
1435 
1436 	flags |= cachep->allocflags;
1437 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1438 		flags |= __GFP_RECLAIMABLE;
1439 
1440 	page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1441 	if (!page) {
1442 		slab_out_of_memory(cachep, flags, nodeid);
1443 		return NULL;
1444 	}
1445 
1446 	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1447 		__free_pages(page, cachep->gfporder);
1448 		return NULL;
1449 	}
1450 
1451 	nr_pages = (1 << cachep->gfporder);
1452 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1453 		add_zone_page_state(page_zone(page),
1454 			NR_SLAB_RECLAIMABLE, nr_pages);
1455 	else
1456 		add_zone_page_state(page_zone(page),
1457 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1458 
1459 	__SetPageSlab(page);
1460 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1461 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1462 		SetPageSlabPfmemalloc(page);
1463 
1464 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1465 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1466 
1467 		if (cachep->ctor)
1468 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1469 		else
1470 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1471 	}
1472 
1473 	return page;
1474 }
1475 
1476 /*
1477  * Interface to system's page release.
1478  */
1479 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1480 {
1481 	int order = cachep->gfporder;
1482 	unsigned long nr_freed = (1 << order);
1483 
1484 	kmemcheck_free_shadow(page, order);
1485 
1486 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1487 		sub_zone_page_state(page_zone(page),
1488 				NR_SLAB_RECLAIMABLE, nr_freed);
1489 	else
1490 		sub_zone_page_state(page_zone(page),
1491 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1492 
1493 	BUG_ON(!PageSlab(page));
1494 	__ClearPageSlabPfmemalloc(page);
1495 	__ClearPageSlab(page);
1496 	page_mapcount_reset(page);
1497 	page->mapping = NULL;
1498 
1499 	if (current->reclaim_state)
1500 		current->reclaim_state->reclaimed_slab += nr_freed;
1501 	memcg_uncharge_slab(page, order, cachep);
1502 	__free_pages(page, order);
1503 }
1504 
1505 static void kmem_rcu_free(struct rcu_head *head)
1506 {
1507 	struct kmem_cache *cachep;
1508 	struct page *page;
1509 
1510 	page = container_of(head, struct page, rcu_head);
1511 	cachep = page->slab_cache;
1512 
1513 	kmem_freepages(cachep, page);
1514 }
1515 
1516 #if DEBUG
1517 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1518 {
1519 	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1520 		(cachep->size % PAGE_SIZE) == 0)
1521 		return true;
1522 
1523 	return false;
1524 }
1525 
1526 #ifdef CONFIG_DEBUG_PAGEALLOC
1527 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1528 			    unsigned long caller)
1529 {
1530 	int size = cachep->object_size;
1531 
1532 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1533 
1534 	if (size < 5 * sizeof(unsigned long))
1535 		return;
1536 
1537 	*addr++ = 0x12345678;
1538 	*addr++ = caller;
1539 	*addr++ = smp_processor_id();
1540 	size -= 3 * sizeof(unsigned long);
1541 	{
1542 		unsigned long *sptr = &caller;
1543 		unsigned long svalue;
1544 
1545 		while (!kstack_end(sptr)) {
1546 			svalue = *sptr++;
1547 			if (kernel_text_address(svalue)) {
1548 				*addr++ = svalue;
1549 				size -= sizeof(unsigned long);
1550 				if (size <= sizeof(unsigned long))
1551 					break;
1552 			}
1553 		}
1554 
1555 	}
1556 	*addr++ = 0x87654321;
1557 }
1558 
1559 static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1560 				int map, unsigned long caller)
1561 {
1562 	if (!is_debug_pagealloc_cache(cachep))
1563 		return;
1564 
1565 	if (caller)
1566 		store_stackinfo(cachep, objp, caller);
1567 
1568 	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1569 }
1570 
1571 #else
1572 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1573 				int map, unsigned long caller) {}
1574 
1575 #endif
1576 
1577 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1578 {
1579 	int size = cachep->object_size;
1580 	addr = &((char *)addr)[obj_offset(cachep)];
1581 
1582 	memset(addr, val, size);
1583 	*(unsigned char *)(addr + size - 1) = POISON_END;
1584 }
1585 
1586 static void dump_line(char *data, int offset, int limit)
1587 {
1588 	int i;
1589 	unsigned char error = 0;
1590 	int bad_count = 0;
1591 
1592 	pr_err("%03x: ", offset);
1593 	for (i = 0; i < limit; i++) {
1594 		if (data[offset + i] != POISON_FREE) {
1595 			error = data[offset + i];
1596 			bad_count++;
1597 		}
1598 	}
1599 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1600 			&data[offset], limit, 1);
1601 
1602 	if (bad_count == 1) {
1603 		error ^= POISON_FREE;
1604 		if (!(error & (error - 1))) {
1605 			pr_err("Single bit error detected. Probably bad RAM.\n");
1606 #ifdef CONFIG_X86
1607 			pr_err("Run memtest86+ or a similar memory test tool.\n");
1608 #else
1609 			pr_err("Run a memory test tool.\n");
1610 #endif
1611 		}
1612 	}
1613 }
1614 #endif
1615 
1616 #if DEBUG
1617 
1618 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1619 {
1620 	int i, size;
1621 	char *realobj;
1622 
1623 	if (cachep->flags & SLAB_RED_ZONE) {
1624 		pr_err("Redzone: 0x%llx/0x%llx\n",
1625 		       *dbg_redzone1(cachep, objp),
1626 		       *dbg_redzone2(cachep, objp));
1627 	}
1628 
1629 	if (cachep->flags & SLAB_STORE_USER) {
1630 		pr_err("Last user: [<%p>](%pSR)\n",
1631 		       *dbg_userword(cachep, objp),
1632 		       *dbg_userword(cachep, objp));
1633 	}
1634 	realobj = (char *)objp + obj_offset(cachep);
1635 	size = cachep->object_size;
1636 	for (i = 0; i < size && lines; i += 16, lines--) {
1637 		int limit;
1638 		limit = 16;
1639 		if (i + limit > size)
1640 			limit = size - i;
1641 		dump_line(realobj, i, limit);
1642 	}
1643 }
1644 
1645 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1646 {
1647 	char *realobj;
1648 	int size, i;
1649 	int lines = 0;
1650 
1651 	if (is_debug_pagealloc_cache(cachep))
1652 		return;
1653 
1654 	realobj = (char *)objp + obj_offset(cachep);
1655 	size = cachep->object_size;
1656 
1657 	for (i = 0; i < size; i++) {
1658 		char exp = POISON_FREE;
1659 		if (i == size - 1)
1660 			exp = POISON_END;
1661 		if (realobj[i] != exp) {
1662 			int limit;
1663 			/* Mismatch ! */
1664 			/* Print header */
1665 			if (lines == 0) {
1666 				pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1667 				       print_tainted(), cachep->name,
1668 				       realobj, size);
1669 				print_objinfo(cachep, objp, 0);
1670 			}
1671 			/* Hexdump the affected line */
1672 			i = (i / 16) * 16;
1673 			limit = 16;
1674 			if (i + limit > size)
1675 				limit = size - i;
1676 			dump_line(realobj, i, limit);
1677 			i += 16;
1678 			lines++;
1679 			/* Limit to 5 lines */
1680 			if (lines > 5)
1681 				break;
1682 		}
1683 	}
1684 	if (lines != 0) {
1685 		/* Print some data about the neighboring objects, if they
1686 		 * exist:
1687 		 */
1688 		struct page *page = virt_to_head_page(objp);
1689 		unsigned int objnr;
1690 
1691 		objnr = obj_to_index(cachep, page, objp);
1692 		if (objnr) {
1693 			objp = index_to_obj(cachep, page, objnr - 1);
1694 			realobj = (char *)objp + obj_offset(cachep);
1695 			pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
1696 			print_objinfo(cachep, objp, 2);
1697 		}
1698 		if (objnr + 1 < cachep->num) {
1699 			objp = index_to_obj(cachep, page, objnr + 1);
1700 			realobj = (char *)objp + obj_offset(cachep);
1701 			pr_err("Next obj: start=%p, len=%d\n", realobj, size);
1702 			print_objinfo(cachep, objp, 2);
1703 		}
1704 	}
1705 }
1706 #endif
1707 
1708 #if DEBUG
1709 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1710 						struct page *page)
1711 {
1712 	int i;
1713 
1714 	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1715 		poison_obj(cachep, page->freelist - obj_offset(cachep),
1716 			POISON_FREE);
1717 	}
1718 
1719 	for (i = 0; i < cachep->num; i++) {
1720 		void *objp = index_to_obj(cachep, page, i);
1721 
1722 		if (cachep->flags & SLAB_POISON) {
1723 			check_poison_obj(cachep, objp);
1724 			slab_kernel_map(cachep, objp, 1, 0);
1725 		}
1726 		if (cachep->flags & SLAB_RED_ZONE) {
1727 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1728 				slab_error(cachep, "start of a freed object was overwritten");
1729 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1730 				slab_error(cachep, "end of a freed object was overwritten");
1731 		}
1732 	}
1733 }
1734 #else
1735 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1736 						struct page *page)
1737 {
1738 }
1739 #endif
1740 
1741 /**
1742  * slab_destroy - destroy and release all objects in a slab
1743  * @cachep: cache pointer being destroyed
1744  * @page: page pointer being destroyed
1745  *
1746  * Destroy all the objs in a slab page, and release the mem back to the system.
1747  * Before calling the slab page must have been unlinked from the cache. The
1748  * kmem_cache_node ->list_lock is not held/needed.
1749  */
1750 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1751 {
1752 	void *freelist;
1753 
1754 	freelist = page->freelist;
1755 	slab_destroy_debugcheck(cachep, page);
1756 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1757 		call_rcu(&page->rcu_head, kmem_rcu_free);
1758 	else
1759 		kmem_freepages(cachep, page);
1760 
1761 	/*
1762 	 * From now on, we don't use freelist
1763 	 * although actual page can be freed in rcu context
1764 	 */
1765 	if (OFF_SLAB(cachep))
1766 		kmem_cache_free(cachep->freelist_cache, freelist);
1767 }
1768 
1769 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1770 {
1771 	struct page *page, *n;
1772 
1773 	list_for_each_entry_safe(page, n, list, lru) {
1774 		list_del(&page->lru);
1775 		slab_destroy(cachep, page);
1776 	}
1777 }
1778 
1779 /**
1780  * calculate_slab_order - calculate size (page order) of slabs
1781  * @cachep: pointer to the cache that is being created
1782  * @size: size of objects to be created in this cache.
1783  * @flags: slab allocation flags
1784  *
1785  * Also calculates the number of objects per slab.
1786  *
1787  * This could be made much more intelligent.  For now, try to avoid using
1788  * high order pages for slabs.  When the gfp() functions are more friendly
1789  * towards high-order requests, this should be changed.
1790  */
1791 static size_t calculate_slab_order(struct kmem_cache *cachep,
1792 				size_t size, unsigned long flags)
1793 {
1794 	size_t left_over = 0;
1795 	int gfporder;
1796 
1797 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1798 		unsigned int num;
1799 		size_t remainder;
1800 
1801 		num = cache_estimate(gfporder, size, flags, &remainder);
1802 		if (!num)
1803 			continue;
1804 
1805 		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1806 		if (num > SLAB_OBJ_MAX_NUM)
1807 			break;
1808 
1809 		if (flags & CFLGS_OFF_SLAB) {
1810 			struct kmem_cache *freelist_cache;
1811 			size_t freelist_size;
1812 
1813 			freelist_size = num * sizeof(freelist_idx_t);
1814 			freelist_cache = kmalloc_slab(freelist_size, 0u);
1815 			if (!freelist_cache)
1816 				continue;
1817 
1818 			/*
1819 			 * Needed to avoid possible looping condition
1820 			 * in cache_grow_begin()
1821 			 */
1822 			if (OFF_SLAB(freelist_cache))
1823 				continue;
1824 
1825 			/* check if off slab has enough benefit */
1826 			if (freelist_cache->size > cachep->size / 2)
1827 				continue;
1828 		}
1829 
1830 		/* Found something acceptable - save it away */
1831 		cachep->num = num;
1832 		cachep->gfporder = gfporder;
1833 		left_over = remainder;
1834 
1835 		/*
1836 		 * A VFS-reclaimable slab tends to have most allocations
1837 		 * as GFP_NOFS and we really don't want to have to be allocating
1838 		 * higher-order pages when we are unable to shrink dcache.
1839 		 */
1840 		if (flags & SLAB_RECLAIM_ACCOUNT)
1841 			break;
1842 
1843 		/*
1844 		 * Large number of objects is good, but very large slabs are
1845 		 * currently bad for the gfp()s.
1846 		 */
1847 		if (gfporder >= slab_max_order)
1848 			break;
1849 
1850 		/*
1851 		 * Acceptable internal fragmentation?
1852 		 */
1853 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1854 			break;
1855 	}
1856 	return left_over;
1857 }
1858 
1859 static struct array_cache __percpu *alloc_kmem_cache_cpus(
1860 		struct kmem_cache *cachep, int entries, int batchcount)
1861 {
1862 	int cpu;
1863 	size_t size;
1864 	struct array_cache __percpu *cpu_cache;
1865 
1866 	size = sizeof(void *) * entries + sizeof(struct array_cache);
1867 	cpu_cache = __alloc_percpu(size, sizeof(void *));
1868 
1869 	if (!cpu_cache)
1870 		return NULL;
1871 
1872 	for_each_possible_cpu(cpu) {
1873 		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1874 				entries, batchcount);
1875 	}
1876 
1877 	return cpu_cache;
1878 }
1879 
1880 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1881 {
1882 	if (slab_state >= FULL)
1883 		return enable_cpucache(cachep, gfp);
1884 
1885 	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1886 	if (!cachep->cpu_cache)
1887 		return 1;
1888 
1889 	if (slab_state == DOWN) {
1890 		/* Creation of first cache (kmem_cache). */
1891 		set_up_node(kmem_cache, CACHE_CACHE);
1892 	} else if (slab_state == PARTIAL) {
1893 		/* For kmem_cache_node */
1894 		set_up_node(cachep, SIZE_NODE);
1895 	} else {
1896 		int node;
1897 
1898 		for_each_online_node(node) {
1899 			cachep->node[node] = kmalloc_node(
1900 				sizeof(struct kmem_cache_node), gfp, node);
1901 			BUG_ON(!cachep->node[node]);
1902 			kmem_cache_node_init(cachep->node[node]);
1903 		}
1904 	}
1905 
1906 	cachep->node[numa_mem_id()]->next_reap =
1907 			jiffies + REAPTIMEOUT_NODE +
1908 			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1909 
1910 	cpu_cache_get(cachep)->avail = 0;
1911 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1912 	cpu_cache_get(cachep)->batchcount = 1;
1913 	cpu_cache_get(cachep)->touched = 0;
1914 	cachep->batchcount = 1;
1915 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1916 	return 0;
1917 }
1918 
1919 unsigned long kmem_cache_flags(unsigned long object_size,
1920 	unsigned long flags, const char *name,
1921 	void (*ctor)(void *))
1922 {
1923 	return flags;
1924 }
1925 
1926 struct kmem_cache *
1927 __kmem_cache_alias(const char *name, size_t size, size_t align,
1928 		   unsigned long flags, void (*ctor)(void *))
1929 {
1930 	struct kmem_cache *cachep;
1931 
1932 	cachep = find_mergeable(size, align, flags, name, ctor);
1933 	if (cachep) {
1934 		cachep->refcount++;
1935 
1936 		/*
1937 		 * Adjust the object sizes so that we clear
1938 		 * the complete object on kzalloc.
1939 		 */
1940 		cachep->object_size = max_t(int, cachep->object_size, size);
1941 	}
1942 	return cachep;
1943 }
1944 
1945 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1946 			size_t size, unsigned long flags)
1947 {
1948 	size_t left;
1949 
1950 	cachep->num = 0;
1951 
1952 	if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
1953 		return false;
1954 
1955 	left = calculate_slab_order(cachep, size,
1956 			flags | CFLGS_OBJFREELIST_SLAB);
1957 	if (!cachep->num)
1958 		return false;
1959 
1960 	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1961 		return false;
1962 
1963 	cachep->colour = left / cachep->colour_off;
1964 
1965 	return true;
1966 }
1967 
1968 static bool set_off_slab_cache(struct kmem_cache *cachep,
1969 			size_t size, unsigned long flags)
1970 {
1971 	size_t left;
1972 
1973 	cachep->num = 0;
1974 
1975 	/*
1976 	 * Always use on-slab management when SLAB_NOLEAKTRACE
1977 	 * to avoid recursive calls into kmemleak.
1978 	 */
1979 	if (flags & SLAB_NOLEAKTRACE)
1980 		return false;
1981 
1982 	/*
1983 	 * Size is large, assume best to place the slab management obj
1984 	 * off-slab (should allow better packing of objs).
1985 	 */
1986 	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1987 	if (!cachep->num)
1988 		return false;
1989 
1990 	/*
1991 	 * If the slab has been placed off-slab, and we have enough space then
1992 	 * move it on-slab. This is at the expense of any extra colouring.
1993 	 */
1994 	if (left >= cachep->num * sizeof(freelist_idx_t))
1995 		return false;
1996 
1997 	cachep->colour = left / cachep->colour_off;
1998 
1999 	return true;
2000 }
2001 
2002 static bool set_on_slab_cache(struct kmem_cache *cachep,
2003 			size_t size, unsigned long flags)
2004 {
2005 	size_t left;
2006 
2007 	cachep->num = 0;
2008 
2009 	left = calculate_slab_order(cachep, size, flags);
2010 	if (!cachep->num)
2011 		return false;
2012 
2013 	cachep->colour = left / cachep->colour_off;
2014 
2015 	return true;
2016 }
2017 
2018 /**
2019  * __kmem_cache_create - Create a cache.
2020  * @cachep: cache management descriptor
2021  * @flags: SLAB flags
2022  *
2023  * Returns a ptr to the cache on success, NULL on failure.
2024  * Cannot be called within a int, but can be interrupted.
2025  * The @ctor is run when new pages are allocated by the cache.
2026  *
2027  * The flags are
2028  *
2029  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2030  * to catch references to uninitialised memory.
2031  *
2032  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2033  * for buffer overruns.
2034  *
2035  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2036  * cacheline.  This can be beneficial if you're counting cycles as closely
2037  * as davem.
2038  */
2039 int
2040 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2041 {
2042 	size_t ralign = BYTES_PER_WORD;
2043 	gfp_t gfp;
2044 	int err;
2045 	size_t size = cachep->size;
2046 
2047 #if DEBUG
2048 #if FORCED_DEBUG
2049 	/*
2050 	 * Enable redzoning and last user accounting, except for caches with
2051 	 * large objects, if the increased size would increase the object size
2052 	 * above the next power of two: caches with object sizes just above a
2053 	 * power of two have a significant amount of internal fragmentation.
2054 	 */
2055 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2056 						2 * sizeof(unsigned long long)))
2057 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2058 	if (!(flags & SLAB_DESTROY_BY_RCU))
2059 		flags |= SLAB_POISON;
2060 #endif
2061 #endif
2062 
2063 	/*
2064 	 * Check that size is in terms of words.  This is needed to avoid
2065 	 * unaligned accesses for some archs when redzoning is used, and makes
2066 	 * sure any on-slab bufctl's are also correctly aligned.
2067 	 */
2068 	if (size & (BYTES_PER_WORD - 1)) {
2069 		size += (BYTES_PER_WORD - 1);
2070 		size &= ~(BYTES_PER_WORD - 1);
2071 	}
2072 
2073 	if (flags & SLAB_RED_ZONE) {
2074 		ralign = REDZONE_ALIGN;
2075 		/* If redzoning, ensure that the second redzone is suitably
2076 		 * aligned, by adjusting the object size accordingly. */
2077 		size += REDZONE_ALIGN - 1;
2078 		size &= ~(REDZONE_ALIGN - 1);
2079 	}
2080 
2081 	/* 3) caller mandated alignment */
2082 	if (ralign < cachep->align) {
2083 		ralign = cachep->align;
2084 	}
2085 	/* disable debug if necessary */
2086 	if (ralign > __alignof__(unsigned long long))
2087 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2088 	/*
2089 	 * 4) Store it.
2090 	 */
2091 	cachep->align = ralign;
2092 	cachep->colour_off = cache_line_size();
2093 	/* Offset must be a multiple of the alignment. */
2094 	if (cachep->colour_off < cachep->align)
2095 		cachep->colour_off = cachep->align;
2096 
2097 	if (slab_is_available())
2098 		gfp = GFP_KERNEL;
2099 	else
2100 		gfp = GFP_NOWAIT;
2101 
2102 #if DEBUG
2103 
2104 	/*
2105 	 * Both debugging options require word-alignment which is calculated
2106 	 * into align above.
2107 	 */
2108 	if (flags & SLAB_RED_ZONE) {
2109 		/* add space for red zone words */
2110 		cachep->obj_offset += sizeof(unsigned long long);
2111 		size += 2 * sizeof(unsigned long long);
2112 	}
2113 	if (flags & SLAB_STORE_USER) {
2114 		/* user store requires one word storage behind the end of
2115 		 * the real object. But if the second red zone needs to be
2116 		 * aligned to 64 bits, we must allow that much space.
2117 		 */
2118 		if (flags & SLAB_RED_ZONE)
2119 			size += REDZONE_ALIGN;
2120 		else
2121 			size += BYTES_PER_WORD;
2122 	}
2123 #endif
2124 
2125 	kasan_cache_create(cachep, &size, &flags);
2126 
2127 	size = ALIGN(size, cachep->align);
2128 	/*
2129 	 * We should restrict the number of objects in a slab to implement
2130 	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2131 	 */
2132 	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2133 		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2134 
2135 #if DEBUG
2136 	/*
2137 	 * To activate debug pagealloc, off-slab management is necessary
2138 	 * requirement. In early phase of initialization, small sized slab
2139 	 * doesn't get initialized so it would not be possible. So, we need
2140 	 * to check size >= 256. It guarantees that all necessary small
2141 	 * sized slab is initialized in current slab initialization sequence.
2142 	 */
2143 	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2144 		size >= 256 && cachep->object_size > cache_line_size()) {
2145 		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2146 			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2147 
2148 			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2149 				flags |= CFLGS_OFF_SLAB;
2150 				cachep->obj_offset += tmp_size - size;
2151 				size = tmp_size;
2152 				goto done;
2153 			}
2154 		}
2155 	}
2156 #endif
2157 
2158 	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2159 		flags |= CFLGS_OBJFREELIST_SLAB;
2160 		goto done;
2161 	}
2162 
2163 	if (set_off_slab_cache(cachep, size, flags)) {
2164 		flags |= CFLGS_OFF_SLAB;
2165 		goto done;
2166 	}
2167 
2168 	if (set_on_slab_cache(cachep, size, flags))
2169 		goto done;
2170 
2171 	return -E2BIG;
2172 
2173 done:
2174 	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2175 	cachep->flags = flags;
2176 	cachep->allocflags = __GFP_COMP;
2177 	if (flags & SLAB_CACHE_DMA)
2178 		cachep->allocflags |= GFP_DMA;
2179 	cachep->size = size;
2180 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2181 
2182 #if DEBUG
2183 	/*
2184 	 * If we're going to use the generic kernel_map_pages()
2185 	 * poisoning, then it's going to smash the contents of
2186 	 * the redzone and userword anyhow, so switch them off.
2187 	 */
2188 	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2189 		(cachep->flags & SLAB_POISON) &&
2190 		is_debug_pagealloc_cache(cachep))
2191 		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2192 #endif
2193 
2194 	if (OFF_SLAB(cachep)) {
2195 		cachep->freelist_cache =
2196 			kmalloc_slab(cachep->freelist_size, 0u);
2197 	}
2198 
2199 	err = setup_cpu_cache(cachep, gfp);
2200 	if (err) {
2201 		__kmem_cache_release(cachep);
2202 		return err;
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 #if DEBUG
2209 static void check_irq_off(void)
2210 {
2211 	BUG_ON(!irqs_disabled());
2212 }
2213 
2214 static void check_irq_on(void)
2215 {
2216 	BUG_ON(irqs_disabled());
2217 }
2218 
2219 static void check_mutex_acquired(void)
2220 {
2221 	BUG_ON(!mutex_is_locked(&slab_mutex));
2222 }
2223 
2224 static void check_spinlock_acquired(struct kmem_cache *cachep)
2225 {
2226 #ifdef CONFIG_SMP
2227 	check_irq_off();
2228 	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2229 #endif
2230 }
2231 
2232 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2233 {
2234 #ifdef CONFIG_SMP
2235 	check_irq_off();
2236 	assert_spin_locked(&get_node(cachep, node)->list_lock);
2237 #endif
2238 }
2239 
2240 #else
2241 #define check_irq_off()	do { } while(0)
2242 #define check_irq_on()	do { } while(0)
2243 #define check_mutex_acquired()	do { } while(0)
2244 #define check_spinlock_acquired(x) do { } while(0)
2245 #define check_spinlock_acquired_node(x, y) do { } while(0)
2246 #endif
2247 
2248 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2249 				int node, bool free_all, struct list_head *list)
2250 {
2251 	int tofree;
2252 
2253 	if (!ac || !ac->avail)
2254 		return;
2255 
2256 	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2257 	if (tofree > ac->avail)
2258 		tofree = (ac->avail + 1) / 2;
2259 
2260 	free_block(cachep, ac->entry, tofree, node, list);
2261 	ac->avail -= tofree;
2262 	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2263 }
2264 
2265 static void do_drain(void *arg)
2266 {
2267 	struct kmem_cache *cachep = arg;
2268 	struct array_cache *ac;
2269 	int node = numa_mem_id();
2270 	struct kmem_cache_node *n;
2271 	LIST_HEAD(list);
2272 
2273 	check_irq_off();
2274 	ac = cpu_cache_get(cachep);
2275 	n = get_node(cachep, node);
2276 	spin_lock(&n->list_lock);
2277 	free_block(cachep, ac->entry, ac->avail, node, &list);
2278 	spin_unlock(&n->list_lock);
2279 	slabs_destroy(cachep, &list);
2280 	ac->avail = 0;
2281 }
2282 
2283 static void drain_cpu_caches(struct kmem_cache *cachep)
2284 {
2285 	struct kmem_cache_node *n;
2286 	int node;
2287 	LIST_HEAD(list);
2288 
2289 	on_each_cpu(do_drain, cachep, 1);
2290 	check_irq_on();
2291 	for_each_kmem_cache_node(cachep, node, n)
2292 		if (n->alien)
2293 			drain_alien_cache(cachep, n->alien);
2294 
2295 	for_each_kmem_cache_node(cachep, node, n) {
2296 		spin_lock_irq(&n->list_lock);
2297 		drain_array_locked(cachep, n->shared, node, true, &list);
2298 		spin_unlock_irq(&n->list_lock);
2299 
2300 		slabs_destroy(cachep, &list);
2301 	}
2302 }
2303 
2304 /*
2305  * Remove slabs from the list of free slabs.
2306  * Specify the number of slabs to drain in tofree.
2307  *
2308  * Returns the actual number of slabs released.
2309  */
2310 static int drain_freelist(struct kmem_cache *cache,
2311 			struct kmem_cache_node *n, int tofree)
2312 {
2313 	struct list_head *p;
2314 	int nr_freed;
2315 	struct page *page;
2316 
2317 	nr_freed = 0;
2318 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2319 
2320 		spin_lock_irq(&n->list_lock);
2321 		p = n->slabs_free.prev;
2322 		if (p == &n->slabs_free) {
2323 			spin_unlock_irq(&n->list_lock);
2324 			goto out;
2325 		}
2326 
2327 		page = list_entry(p, struct page, lru);
2328 		list_del(&page->lru);
2329 		/*
2330 		 * Safe to drop the lock. The slab is no longer linked
2331 		 * to the cache.
2332 		 */
2333 		n->free_objects -= cache->num;
2334 		spin_unlock_irq(&n->list_lock);
2335 		slab_destroy(cache, page);
2336 		nr_freed++;
2337 	}
2338 out:
2339 	return nr_freed;
2340 }
2341 
2342 int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2343 {
2344 	int ret = 0;
2345 	int node;
2346 	struct kmem_cache_node *n;
2347 
2348 	drain_cpu_caches(cachep);
2349 
2350 	check_irq_on();
2351 	for_each_kmem_cache_node(cachep, node, n) {
2352 		drain_freelist(cachep, n, INT_MAX);
2353 
2354 		ret += !list_empty(&n->slabs_full) ||
2355 			!list_empty(&n->slabs_partial);
2356 	}
2357 	return (ret ? 1 : 0);
2358 }
2359 
2360 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2361 {
2362 	return __kmem_cache_shrink(cachep, false);
2363 }
2364 
2365 void __kmem_cache_release(struct kmem_cache *cachep)
2366 {
2367 	int i;
2368 	struct kmem_cache_node *n;
2369 
2370 	cache_random_seq_destroy(cachep);
2371 
2372 	free_percpu(cachep->cpu_cache);
2373 
2374 	/* NUMA: free the node structures */
2375 	for_each_kmem_cache_node(cachep, i, n) {
2376 		kfree(n->shared);
2377 		free_alien_cache(n->alien);
2378 		kfree(n);
2379 		cachep->node[i] = NULL;
2380 	}
2381 }
2382 
2383 /*
2384  * Get the memory for a slab management obj.
2385  *
2386  * For a slab cache when the slab descriptor is off-slab, the
2387  * slab descriptor can't come from the same cache which is being created,
2388  * Because if it is the case, that means we defer the creation of
2389  * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2390  * And we eventually call down to __kmem_cache_create(), which
2391  * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2392  * This is a "chicken-and-egg" problem.
2393  *
2394  * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2395  * which are all initialized during kmem_cache_init().
2396  */
2397 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2398 				   struct page *page, int colour_off,
2399 				   gfp_t local_flags, int nodeid)
2400 {
2401 	void *freelist;
2402 	void *addr = page_address(page);
2403 
2404 	page->s_mem = addr + colour_off;
2405 	page->active = 0;
2406 
2407 	if (OBJFREELIST_SLAB(cachep))
2408 		freelist = NULL;
2409 	else if (OFF_SLAB(cachep)) {
2410 		/* Slab management obj is off-slab. */
2411 		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2412 					      local_flags, nodeid);
2413 		if (!freelist)
2414 			return NULL;
2415 	} else {
2416 		/* We will use last bytes at the slab for freelist */
2417 		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2418 				cachep->freelist_size;
2419 	}
2420 
2421 	return freelist;
2422 }
2423 
2424 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2425 {
2426 	return ((freelist_idx_t *)page->freelist)[idx];
2427 }
2428 
2429 static inline void set_free_obj(struct page *page,
2430 					unsigned int idx, freelist_idx_t val)
2431 {
2432 	((freelist_idx_t *)(page->freelist))[idx] = val;
2433 }
2434 
2435 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2436 {
2437 #if DEBUG
2438 	int i;
2439 
2440 	for (i = 0; i < cachep->num; i++) {
2441 		void *objp = index_to_obj(cachep, page, i);
2442 
2443 		if (cachep->flags & SLAB_STORE_USER)
2444 			*dbg_userword(cachep, objp) = NULL;
2445 
2446 		if (cachep->flags & SLAB_RED_ZONE) {
2447 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2448 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2449 		}
2450 		/*
2451 		 * Constructors are not allowed to allocate memory from the same
2452 		 * cache which they are a constructor for.  Otherwise, deadlock.
2453 		 * They must also be threaded.
2454 		 */
2455 		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2456 			kasan_unpoison_object_data(cachep,
2457 						   objp + obj_offset(cachep));
2458 			cachep->ctor(objp + obj_offset(cachep));
2459 			kasan_poison_object_data(
2460 				cachep, objp + obj_offset(cachep));
2461 		}
2462 
2463 		if (cachep->flags & SLAB_RED_ZONE) {
2464 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2465 				slab_error(cachep, "constructor overwrote the end of an object");
2466 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2467 				slab_error(cachep, "constructor overwrote the start of an object");
2468 		}
2469 		/* need to poison the objs? */
2470 		if (cachep->flags & SLAB_POISON) {
2471 			poison_obj(cachep, objp, POISON_FREE);
2472 			slab_kernel_map(cachep, objp, 0, 0);
2473 		}
2474 	}
2475 #endif
2476 }
2477 
2478 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2479 /* Hold information during a freelist initialization */
2480 union freelist_init_state {
2481 	struct {
2482 		unsigned int pos;
2483 		unsigned int *list;
2484 		unsigned int count;
2485 		unsigned int rand;
2486 	};
2487 	struct rnd_state rnd_state;
2488 };
2489 
2490 /*
2491  * Initialize the state based on the randomization methode available.
2492  * return true if the pre-computed list is available, false otherwize.
2493  */
2494 static bool freelist_state_initialize(union freelist_init_state *state,
2495 				struct kmem_cache *cachep,
2496 				unsigned int count)
2497 {
2498 	bool ret;
2499 	unsigned int rand;
2500 
2501 	/* Use best entropy available to define a random shift */
2502 	rand = get_random_int();
2503 
2504 	/* Use a random state if the pre-computed list is not available */
2505 	if (!cachep->random_seq) {
2506 		prandom_seed_state(&state->rnd_state, rand);
2507 		ret = false;
2508 	} else {
2509 		state->list = cachep->random_seq;
2510 		state->count = count;
2511 		state->pos = 0;
2512 		state->rand = rand;
2513 		ret = true;
2514 	}
2515 	return ret;
2516 }
2517 
2518 /* Get the next entry on the list and randomize it using a random shift */
2519 static freelist_idx_t next_random_slot(union freelist_init_state *state)
2520 {
2521 	return (state->list[state->pos++] + state->rand) % state->count;
2522 }
2523 
2524 /* Swap two freelist entries */
2525 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2526 {
2527 	swap(((freelist_idx_t *)page->freelist)[a],
2528 		((freelist_idx_t *)page->freelist)[b]);
2529 }
2530 
2531 /*
2532  * Shuffle the freelist initialization state based on pre-computed lists.
2533  * return true if the list was successfully shuffled, false otherwise.
2534  */
2535 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2536 {
2537 	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2538 	union freelist_init_state state;
2539 	bool precomputed;
2540 
2541 	if (count < 2)
2542 		return false;
2543 
2544 	precomputed = freelist_state_initialize(&state, cachep, count);
2545 
2546 	/* Take a random entry as the objfreelist */
2547 	if (OBJFREELIST_SLAB(cachep)) {
2548 		if (!precomputed)
2549 			objfreelist = count - 1;
2550 		else
2551 			objfreelist = next_random_slot(&state);
2552 		page->freelist = index_to_obj(cachep, page, objfreelist) +
2553 						obj_offset(cachep);
2554 		count--;
2555 	}
2556 
2557 	/*
2558 	 * On early boot, generate the list dynamically.
2559 	 * Later use a pre-computed list for speed.
2560 	 */
2561 	if (!precomputed) {
2562 		for (i = 0; i < count; i++)
2563 			set_free_obj(page, i, i);
2564 
2565 		/* Fisher-Yates shuffle */
2566 		for (i = count - 1; i > 0; i--) {
2567 			rand = prandom_u32_state(&state.rnd_state);
2568 			rand %= (i + 1);
2569 			swap_free_obj(page, i, rand);
2570 		}
2571 	} else {
2572 		for (i = 0; i < count; i++)
2573 			set_free_obj(page, i, next_random_slot(&state));
2574 	}
2575 
2576 	if (OBJFREELIST_SLAB(cachep))
2577 		set_free_obj(page, cachep->num - 1, objfreelist);
2578 
2579 	return true;
2580 }
2581 #else
2582 static inline bool shuffle_freelist(struct kmem_cache *cachep,
2583 				struct page *page)
2584 {
2585 	return false;
2586 }
2587 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2588 
2589 static void cache_init_objs(struct kmem_cache *cachep,
2590 			    struct page *page)
2591 {
2592 	int i;
2593 	void *objp;
2594 	bool shuffled;
2595 
2596 	cache_init_objs_debug(cachep, page);
2597 
2598 	/* Try to randomize the freelist if enabled */
2599 	shuffled = shuffle_freelist(cachep, page);
2600 
2601 	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2602 		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2603 						obj_offset(cachep);
2604 	}
2605 
2606 	for (i = 0; i < cachep->num; i++) {
2607 		objp = index_to_obj(cachep, page, i);
2608 		kasan_init_slab_obj(cachep, objp);
2609 
2610 		/* constructor could break poison info */
2611 		if (DEBUG == 0 && cachep->ctor) {
2612 			kasan_unpoison_object_data(cachep, objp);
2613 			cachep->ctor(objp);
2614 			kasan_poison_object_data(cachep, objp);
2615 		}
2616 
2617 		if (!shuffled)
2618 			set_free_obj(page, i, i);
2619 	}
2620 }
2621 
2622 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2623 {
2624 	void *objp;
2625 
2626 	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2627 	page->active++;
2628 
2629 #if DEBUG
2630 	if (cachep->flags & SLAB_STORE_USER)
2631 		set_store_user_dirty(cachep);
2632 #endif
2633 
2634 	return objp;
2635 }
2636 
2637 static void slab_put_obj(struct kmem_cache *cachep,
2638 			struct page *page, void *objp)
2639 {
2640 	unsigned int objnr = obj_to_index(cachep, page, objp);
2641 #if DEBUG
2642 	unsigned int i;
2643 
2644 	/* Verify double free bug */
2645 	for (i = page->active; i < cachep->num; i++) {
2646 		if (get_free_obj(page, i) == objnr) {
2647 			pr_err("slab: double free detected in cache '%s', objp %p\n",
2648 			       cachep->name, objp);
2649 			BUG();
2650 		}
2651 	}
2652 #endif
2653 	page->active--;
2654 	if (!page->freelist)
2655 		page->freelist = objp + obj_offset(cachep);
2656 
2657 	set_free_obj(page, page->active, objnr);
2658 }
2659 
2660 /*
2661  * Map pages beginning at addr to the given cache and slab. This is required
2662  * for the slab allocator to be able to lookup the cache and slab of a
2663  * virtual address for kfree, ksize, and slab debugging.
2664  */
2665 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2666 			   void *freelist)
2667 {
2668 	page->slab_cache = cache;
2669 	page->freelist = freelist;
2670 }
2671 
2672 /*
2673  * Grow (by 1) the number of slabs within a cache.  This is called by
2674  * kmem_cache_alloc() when there are no active objs left in a cache.
2675  */
2676 static struct page *cache_grow_begin(struct kmem_cache *cachep,
2677 				gfp_t flags, int nodeid)
2678 {
2679 	void *freelist;
2680 	size_t offset;
2681 	gfp_t local_flags;
2682 	int page_node;
2683 	struct kmem_cache_node *n;
2684 	struct page *page;
2685 
2686 	/*
2687 	 * Be lazy and only check for valid flags here,  keeping it out of the
2688 	 * critical path in kmem_cache_alloc().
2689 	 */
2690 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2691 		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2692 		flags &= ~GFP_SLAB_BUG_MASK;
2693 		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2694 				invalid_mask, &invalid_mask, flags, &flags);
2695 		dump_stack();
2696 	}
2697 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2698 
2699 	check_irq_off();
2700 	if (gfpflags_allow_blocking(local_flags))
2701 		local_irq_enable();
2702 
2703 	/*
2704 	 * Get mem for the objs.  Attempt to allocate a physical page from
2705 	 * 'nodeid'.
2706 	 */
2707 	page = kmem_getpages(cachep, local_flags, nodeid);
2708 	if (!page)
2709 		goto failed;
2710 
2711 	page_node = page_to_nid(page);
2712 	n = get_node(cachep, page_node);
2713 
2714 	/* Get colour for the slab, and cal the next value. */
2715 	n->colour_next++;
2716 	if (n->colour_next >= cachep->colour)
2717 		n->colour_next = 0;
2718 
2719 	offset = n->colour_next;
2720 	if (offset >= cachep->colour)
2721 		offset = 0;
2722 
2723 	offset *= cachep->colour_off;
2724 
2725 	/* Get slab management. */
2726 	freelist = alloc_slabmgmt(cachep, page, offset,
2727 			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2728 	if (OFF_SLAB(cachep) && !freelist)
2729 		goto opps1;
2730 
2731 	slab_map_pages(cachep, page, freelist);
2732 
2733 	kasan_poison_slab(page);
2734 	cache_init_objs(cachep, page);
2735 
2736 	if (gfpflags_allow_blocking(local_flags))
2737 		local_irq_disable();
2738 
2739 	return page;
2740 
2741 opps1:
2742 	kmem_freepages(cachep, page);
2743 failed:
2744 	if (gfpflags_allow_blocking(local_flags))
2745 		local_irq_disable();
2746 	return NULL;
2747 }
2748 
2749 static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2750 {
2751 	struct kmem_cache_node *n;
2752 	void *list = NULL;
2753 
2754 	check_irq_off();
2755 
2756 	if (!page)
2757 		return;
2758 
2759 	INIT_LIST_HEAD(&page->lru);
2760 	n = get_node(cachep, page_to_nid(page));
2761 
2762 	spin_lock(&n->list_lock);
2763 	if (!page->active)
2764 		list_add_tail(&page->lru, &(n->slabs_free));
2765 	else
2766 		fixup_slab_list(cachep, n, page, &list);
2767 	STATS_INC_GROWN(cachep);
2768 	n->free_objects += cachep->num - page->active;
2769 	spin_unlock(&n->list_lock);
2770 
2771 	fixup_objfreelist_debug(cachep, &list);
2772 }
2773 
2774 #if DEBUG
2775 
2776 /*
2777  * Perform extra freeing checks:
2778  * - detect bad pointers.
2779  * - POISON/RED_ZONE checking
2780  */
2781 static void kfree_debugcheck(const void *objp)
2782 {
2783 	if (!virt_addr_valid(objp)) {
2784 		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2785 		       (unsigned long)objp);
2786 		BUG();
2787 	}
2788 }
2789 
2790 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2791 {
2792 	unsigned long long redzone1, redzone2;
2793 
2794 	redzone1 = *dbg_redzone1(cache, obj);
2795 	redzone2 = *dbg_redzone2(cache, obj);
2796 
2797 	/*
2798 	 * Redzone is ok.
2799 	 */
2800 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2801 		return;
2802 
2803 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2804 		slab_error(cache, "double free detected");
2805 	else
2806 		slab_error(cache, "memory outside object was overwritten");
2807 
2808 	pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2809 	       obj, redzone1, redzone2);
2810 }
2811 
2812 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2813 				   unsigned long caller)
2814 {
2815 	unsigned int objnr;
2816 	struct page *page;
2817 
2818 	BUG_ON(virt_to_cache(objp) != cachep);
2819 
2820 	objp -= obj_offset(cachep);
2821 	kfree_debugcheck(objp);
2822 	page = virt_to_head_page(objp);
2823 
2824 	if (cachep->flags & SLAB_RED_ZONE) {
2825 		verify_redzone_free(cachep, objp);
2826 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2827 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2828 	}
2829 	if (cachep->flags & SLAB_STORE_USER) {
2830 		set_store_user_dirty(cachep);
2831 		*dbg_userword(cachep, objp) = (void *)caller;
2832 	}
2833 
2834 	objnr = obj_to_index(cachep, page, objp);
2835 
2836 	BUG_ON(objnr >= cachep->num);
2837 	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2838 
2839 	if (cachep->flags & SLAB_POISON) {
2840 		poison_obj(cachep, objp, POISON_FREE);
2841 		slab_kernel_map(cachep, objp, 0, caller);
2842 	}
2843 	return objp;
2844 }
2845 
2846 #else
2847 #define kfree_debugcheck(x) do { } while(0)
2848 #define cache_free_debugcheck(x,objp,z) (objp)
2849 #endif
2850 
2851 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2852 						void **list)
2853 {
2854 #if DEBUG
2855 	void *next = *list;
2856 	void *objp;
2857 
2858 	while (next) {
2859 		objp = next - obj_offset(cachep);
2860 		next = *(void **)next;
2861 		poison_obj(cachep, objp, POISON_FREE);
2862 	}
2863 #endif
2864 }
2865 
2866 static inline void fixup_slab_list(struct kmem_cache *cachep,
2867 				struct kmem_cache_node *n, struct page *page,
2868 				void **list)
2869 {
2870 	/* move slabp to correct slabp list: */
2871 	list_del(&page->lru);
2872 	if (page->active == cachep->num) {
2873 		list_add(&page->lru, &n->slabs_full);
2874 		if (OBJFREELIST_SLAB(cachep)) {
2875 #if DEBUG
2876 			/* Poisoning will be done without holding the lock */
2877 			if (cachep->flags & SLAB_POISON) {
2878 				void **objp = page->freelist;
2879 
2880 				*objp = *list;
2881 				*list = objp;
2882 			}
2883 #endif
2884 			page->freelist = NULL;
2885 		}
2886 	} else
2887 		list_add(&page->lru, &n->slabs_partial);
2888 }
2889 
2890 /* Try to find non-pfmemalloc slab if needed */
2891 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2892 					struct page *page, bool pfmemalloc)
2893 {
2894 	if (!page)
2895 		return NULL;
2896 
2897 	if (pfmemalloc)
2898 		return page;
2899 
2900 	if (!PageSlabPfmemalloc(page))
2901 		return page;
2902 
2903 	/* No need to keep pfmemalloc slab if we have enough free objects */
2904 	if (n->free_objects > n->free_limit) {
2905 		ClearPageSlabPfmemalloc(page);
2906 		return page;
2907 	}
2908 
2909 	/* Move pfmemalloc slab to the end of list to speed up next search */
2910 	list_del(&page->lru);
2911 	if (!page->active)
2912 		list_add_tail(&page->lru, &n->slabs_free);
2913 	else
2914 		list_add_tail(&page->lru, &n->slabs_partial);
2915 
2916 	list_for_each_entry(page, &n->slabs_partial, lru) {
2917 		if (!PageSlabPfmemalloc(page))
2918 			return page;
2919 	}
2920 
2921 	list_for_each_entry(page, &n->slabs_free, lru) {
2922 		if (!PageSlabPfmemalloc(page))
2923 			return page;
2924 	}
2925 
2926 	return NULL;
2927 }
2928 
2929 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2930 {
2931 	struct page *page;
2932 
2933 	page = list_first_entry_or_null(&n->slabs_partial,
2934 			struct page, lru);
2935 	if (!page) {
2936 		n->free_touched = 1;
2937 		page = list_first_entry_or_null(&n->slabs_free,
2938 				struct page, lru);
2939 	}
2940 
2941 	if (sk_memalloc_socks())
2942 		return get_valid_first_slab(n, page, pfmemalloc);
2943 
2944 	return page;
2945 }
2946 
2947 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2948 				struct kmem_cache_node *n, gfp_t flags)
2949 {
2950 	struct page *page;
2951 	void *obj;
2952 	void *list = NULL;
2953 
2954 	if (!gfp_pfmemalloc_allowed(flags))
2955 		return NULL;
2956 
2957 	spin_lock(&n->list_lock);
2958 	page = get_first_slab(n, true);
2959 	if (!page) {
2960 		spin_unlock(&n->list_lock);
2961 		return NULL;
2962 	}
2963 
2964 	obj = slab_get_obj(cachep, page);
2965 	n->free_objects--;
2966 
2967 	fixup_slab_list(cachep, n, page, &list);
2968 
2969 	spin_unlock(&n->list_lock);
2970 	fixup_objfreelist_debug(cachep, &list);
2971 
2972 	return obj;
2973 }
2974 
2975 /*
2976  * Slab list should be fixed up by fixup_slab_list() for existing slab
2977  * or cache_grow_end() for new slab
2978  */
2979 static __always_inline int alloc_block(struct kmem_cache *cachep,
2980 		struct array_cache *ac, struct page *page, int batchcount)
2981 {
2982 	/*
2983 	 * There must be at least one object available for
2984 	 * allocation.
2985 	 */
2986 	BUG_ON(page->active >= cachep->num);
2987 
2988 	while (page->active < cachep->num && batchcount--) {
2989 		STATS_INC_ALLOCED(cachep);
2990 		STATS_INC_ACTIVE(cachep);
2991 		STATS_SET_HIGH(cachep);
2992 
2993 		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2994 	}
2995 
2996 	return batchcount;
2997 }
2998 
2999 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3000 {
3001 	int batchcount;
3002 	struct kmem_cache_node *n;
3003 	struct array_cache *ac, *shared;
3004 	int node;
3005 	void *list = NULL;
3006 	struct page *page;
3007 
3008 	check_irq_off();
3009 	node = numa_mem_id();
3010 
3011 	ac = cpu_cache_get(cachep);
3012 	batchcount = ac->batchcount;
3013 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3014 		/*
3015 		 * If there was little recent activity on this cache, then
3016 		 * perform only a partial refill.  Otherwise we could generate
3017 		 * refill bouncing.
3018 		 */
3019 		batchcount = BATCHREFILL_LIMIT;
3020 	}
3021 	n = get_node(cachep, node);
3022 
3023 	BUG_ON(ac->avail > 0 || !n);
3024 	shared = READ_ONCE(n->shared);
3025 	if (!n->free_objects && (!shared || !shared->avail))
3026 		goto direct_grow;
3027 
3028 	spin_lock(&n->list_lock);
3029 	shared = READ_ONCE(n->shared);
3030 
3031 	/* See if we can refill from the shared array */
3032 	if (shared && transfer_objects(ac, shared, batchcount)) {
3033 		shared->touched = 1;
3034 		goto alloc_done;
3035 	}
3036 
3037 	while (batchcount > 0) {
3038 		/* Get slab alloc is to come from. */
3039 		page = get_first_slab(n, false);
3040 		if (!page)
3041 			goto must_grow;
3042 
3043 		check_spinlock_acquired(cachep);
3044 
3045 		batchcount = alloc_block(cachep, ac, page, batchcount);
3046 		fixup_slab_list(cachep, n, page, &list);
3047 	}
3048 
3049 must_grow:
3050 	n->free_objects -= ac->avail;
3051 alloc_done:
3052 	spin_unlock(&n->list_lock);
3053 	fixup_objfreelist_debug(cachep, &list);
3054 
3055 direct_grow:
3056 	if (unlikely(!ac->avail)) {
3057 		/* Check if we can use obj in pfmemalloc slab */
3058 		if (sk_memalloc_socks()) {
3059 			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3060 
3061 			if (obj)
3062 				return obj;
3063 		}
3064 
3065 		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3066 
3067 		/*
3068 		 * cache_grow_begin() can reenable interrupts,
3069 		 * then ac could change.
3070 		 */
3071 		ac = cpu_cache_get(cachep);
3072 		if (!ac->avail && page)
3073 			alloc_block(cachep, ac, page, batchcount);
3074 		cache_grow_end(cachep, page);
3075 
3076 		if (!ac->avail)
3077 			return NULL;
3078 	}
3079 	ac->touched = 1;
3080 
3081 	return ac->entry[--ac->avail];
3082 }
3083 
3084 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3085 						gfp_t flags)
3086 {
3087 	might_sleep_if(gfpflags_allow_blocking(flags));
3088 }
3089 
3090 #if DEBUG
3091 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3092 				gfp_t flags, void *objp, unsigned long caller)
3093 {
3094 	if (!objp)
3095 		return objp;
3096 	if (cachep->flags & SLAB_POISON) {
3097 		check_poison_obj(cachep, objp);
3098 		slab_kernel_map(cachep, objp, 1, 0);
3099 		poison_obj(cachep, objp, POISON_INUSE);
3100 	}
3101 	if (cachep->flags & SLAB_STORE_USER)
3102 		*dbg_userword(cachep, objp) = (void *)caller;
3103 
3104 	if (cachep->flags & SLAB_RED_ZONE) {
3105 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3106 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3107 			slab_error(cachep, "double free, or memory outside object was overwritten");
3108 			pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3109 			       objp, *dbg_redzone1(cachep, objp),
3110 			       *dbg_redzone2(cachep, objp));
3111 		}
3112 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3113 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3114 	}
3115 
3116 	objp += obj_offset(cachep);
3117 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3118 		cachep->ctor(objp);
3119 	if (ARCH_SLAB_MINALIGN &&
3120 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3121 		pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3122 		       objp, (int)ARCH_SLAB_MINALIGN);
3123 	}
3124 	return objp;
3125 }
3126 #else
3127 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3128 #endif
3129 
3130 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3131 {
3132 	void *objp;
3133 	struct array_cache *ac;
3134 
3135 	check_irq_off();
3136 
3137 	ac = cpu_cache_get(cachep);
3138 	if (likely(ac->avail)) {
3139 		ac->touched = 1;
3140 		objp = ac->entry[--ac->avail];
3141 
3142 		STATS_INC_ALLOCHIT(cachep);
3143 		goto out;
3144 	}
3145 
3146 	STATS_INC_ALLOCMISS(cachep);
3147 	objp = cache_alloc_refill(cachep, flags);
3148 	/*
3149 	 * the 'ac' may be updated by cache_alloc_refill(),
3150 	 * and kmemleak_erase() requires its correct value.
3151 	 */
3152 	ac = cpu_cache_get(cachep);
3153 
3154 out:
3155 	/*
3156 	 * To avoid a false negative, if an object that is in one of the
3157 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3158 	 * treat the array pointers as a reference to the object.
3159 	 */
3160 	if (objp)
3161 		kmemleak_erase(&ac->entry[ac->avail]);
3162 	return objp;
3163 }
3164 
3165 #ifdef CONFIG_NUMA
3166 /*
3167  * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3168  *
3169  * If we are in_interrupt, then process context, including cpusets and
3170  * mempolicy, may not apply and should not be used for allocation policy.
3171  */
3172 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3173 {
3174 	int nid_alloc, nid_here;
3175 
3176 	if (in_interrupt() || (flags & __GFP_THISNODE))
3177 		return NULL;
3178 	nid_alloc = nid_here = numa_mem_id();
3179 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3180 		nid_alloc = cpuset_slab_spread_node();
3181 	else if (current->mempolicy)
3182 		nid_alloc = mempolicy_slab_node();
3183 	if (nid_alloc != nid_here)
3184 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3185 	return NULL;
3186 }
3187 
3188 /*
3189  * Fallback function if there was no memory available and no objects on a
3190  * certain node and fall back is permitted. First we scan all the
3191  * available node for available objects. If that fails then we
3192  * perform an allocation without specifying a node. This allows the page
3193  * allocator to do its reclaim / fallback magic. We then insert the
3194  * slab into the proper nodelist and then allocate from it.
3195  */
3196 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3197 {
3198 	struct zonelist *zonelist;
3199 	struct zoneref *z;
3200 	struct zone *zone;
3201 	enum zone_type high_zoneidx = gfp_zone(flags);
3202 	void *obj = NULL;
3203 	struct page *page;
3204 	int nid;
3205 	unsigned int cpuset_mems_cookie;
3206 
3207 	if (flags & __GFP_THISNODE)
3208 		return NULL;
3209 
3210 retry_cpuset:
3211 	cpuset_mems_cookie = read_mems_allowed_begin();
3212 	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3213 
3214 retry:
3215 	/*
3216 	 * Look through allowed nodes for objects available
3217 	 * from existing per node queues.
3218 	 */
3219 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3220 		nid = zone_to_nid(zone);
3221 
3222 		if (cpuset_zone_allowed(zone, flags) &&
3223 			get_node(cache, nid) &&
3224 			get_node(cache, nid)->free_objects) {
3225 				obj = ____cache_alloc_node(cache,
3226 					gfp_exact_node(flags), nid);
3227 				if (obj)
3228 					break;
3229 		}
3230 	}
3231 
3232 	if (!obj) {
3233 		/*
3234 		 * This allocation will be performed within the constraints
3235 		 * of the current cpuset / memory policy requirements.
3236 		 * We may trigger various forms of reclaim on the allowed
3237 		 * set and go into memory reserves if necessary.
3238 		 */
3239 		page = cache_grow_begin(cache, flags, numa_mem_id());
3240 		cache_grow_end(cache, page);
3241 		if (page) {
3242 			nid = page_to_nid(page);
3243 			obj = ____cache_alloc_node(cache,
3244 				gfp_exact_node(flags), nid);
3245 
3246 			/*
3247 			 * Another processor may allocate the objects in
3248 			 * the slab since we are not holding any locks.
3249 			 */
3250 			if (!obj)
3251 				goto retry;
3252 		}
3253 	}
3254 
3255 	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3256 		goto retry_cpuset;
3257 	return obj;
3258 }
3259 
3260 /*
3261  * A interface to enable slab creation on nodeid
3262  */
3263 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3264 				int nodeid)
3265 {
3266 	struct page *page;
3267 	struct kmem_cache_node *n;
3268 	void *obj = NULL;
3269 	void *list = NULL;
3270 
3271 	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3272 	n = get_node(cachep, nodeid);
3273 	BUG_ON(!n);
3274 
3275 	check_irq_off();
3276 	spin_lock(&n->list_lock);
3277 	page = get_first_slab(n, false);
3278 	if (!page)
3279 		goto must_grow;
3280 
3281 	check_spinlock_acquired_node(cachep, nodeid);
3282 
3283 	STATS_INC_NODEALLOCS(cachep);
3284 	STATS_INC_ACTIVE(cachep);
3285 	STATS_SET_HIGH(cachep);
3286 
3287 	BUG_ON(page->active == cachep->num);
3288 
3289 	obj = slab_get_obj(cachep, page);
3290 	n->free_objects--;
3291 
3292 	fixup_slab_list(cachep, n, page, &list);
3293 
3294 	spin_unlock(&n->list_lock);
3295 	fixup_objfreelist_debug(cachep, &list);
3296 	return obj;
3297 
3298 must_grow:
3299 	spin_unlock(&n->list_lock);
3300 	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3301 	if (page) {
3302 		/* This slab isn't counted yet so don't update free_objects */
3303 		obj = slab_get_obj(cachep, page);
3304 	}
3305 	cache_grow_end(cachep, page);
3306 
3307 	return obj ? obj : fallback_alloc(cachep, flags);
3308 }
3309 
3310 static __always_inline void *
3311 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3312 		   unsigned long caller)
3313 {
3314 	unsigned long save_flags;
3315 	void *ptr;
3316 	int slab_node = numa_mem_id();
3317 
3318 	flags &= gfp_allowed_mask;
3319 	cachep = slab_pre_alloc_hook(cachep, flags);
3320 	if (unlikely(!cachep))
3321 		return NULL;
3322 
3323 	cache_alloc_debugcheck_before(cachep, flags);
3324 	local_irq_save(save_flags);
3325 
3326 	if (nodeid == NUMA_NO_NODE)
3327 		nodeid = slab_node;
3328 
3329 	if (unlikely(!get_node(cachep, nodeid))) {
3330 		/* Node not bootstrapped yet */
3331 		ptr = fallback_alloc(cachep, flags);
3332 		goto out;
3333 	}
3334 
3335 	if (nodeid == slab_node) {
3336 		/*
3337 		 * Use the locally cached objects if possible.
3338 		 * However ____cache_alloc does not allow fallback
3339 		 * to other nodes. It may fail while we still have
3340 		 * objects on other nodes available.
3341 		 */
3342 		ptr = ____cache_alloc(cachep, flags);
3343 		if (ptr)
3344 			goto out;
3345 	}
3346 	/* ___cache_alloc_node can fall back to other nodes */
3347 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3348   out:
3349 	local_irq_restore(save_flags);
3350 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3351 
3352 	if (unlikely(flags & __GFP_ZERO) && ptr)
3353 		memset(ptr, 0, cachep->object_size);
3354 
3355 	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3356 	return ptr;
3357 }
3358 
3359 static __always_inline void *
3360 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3361 {
3362 	void *objp;
3363 
3364 	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3365 		objp = alternate_node_alloc(cache, flags);
3366 		if (objp)
3367 			goto out;
3368 	}
3369 	objp = ____cache_alloc(cache, flags);
3370 
3371 	/*
3372 	 * We may just have run out of memory on the local node.
3373 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3374 	 */
3375 	if (!objp)
3376 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3377 
3378   out:
3379 	return objp;
3380 }
3381 #else
3382 
3383 static __always_inline void *
3384 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3385 {
3386 	return ____cache_alloc(cachep, flags);
3387 }
3388 
3389 #endif /* CONFIG_NUMA */
3390 
3391 static __always_inline void *
3392 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3393 {
3394 	unsigned long save_flags;
3395 	void *objp;
3396 
3397 	flags &= gfp_allowed_mask;
3398 	cachep = slab_pre_alloc_hook(cachep, flags);
3399 	if (unlikely(!cachep))
3400 		return NULL;
3401 
3402 	cache_alloc_debugcheck_before(cachep, flags);
3403 	local_irq_save(save_flags);
3404 	objp = __do_cache_alloc(cachep, flags);
3405 	local_irq_restore(save_flags);
3406 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3407 	prefetchw(objp);
3408 
3409 	if (unlikely(flags & __GFP_ZERO) && objp)
3410 		memset(objp, 0, cachep->object_size);
3411 
3412 	slab_post_alloc_hook(cachep, flags, 1, &objp);
3413 	return objp;
3414 }
3415 
3416 /*
3417  * Caller needs to acquire correct kmem_cache_node's list_lock
3418  * @list: List of detached free slabs should be freed by caller
3419  */
3420 static void free_block(struct kmem_cache *cachep, void **objpp,
3421 			int nr_objects, int node, struct list_head *list)
3422 {
3423 	int i;
3424 	struct kmem_cache_node *n = get_node(cachep, node);
3425 	struct page *page;
3426 
3427 	n->free_objects += nr_objects;
3428 
3429 	for (i = 0; i < nr_objects; i++) {
3430 		void *objp;
3431 		struct page *page;
3432 
3433 		objp = objpp[i];
3434 
3435 		page = virt_to_head_page(objp);
3436 		list_del(&page->lru);
3437 		check_spinlock_acquired_node(cachep, node);
3438 		slab_put_obj(cachep, page, objp);
3439 		STATS_DEC_ACTIVE(cachep);
3440 
3441 		/* fixup slab chains */
3442 		if (page->active == 0)
3443 			list_add(&page->lru, &n->slabs_free);
3444 		else {
3445 			/* Unconditionally move a slab to the end of the
3446 			 * partial list on free - maximum time for the
3447 			 * other objects to be freed, too.
3448 			 */
3449 			list_add_tail(&page->lru, &n->slabs_partial);
3450 		}
3451 	}
3452 
3453 	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3454 		n->free_objects -= cachep->num;
3455 
3456 		page = list_last_entry(&n->slabs_free, struct page, lru);
3457 		list_move(&page->lru, list);
3458 	}
3459 }
3460 
3461 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3462 {
3463 	int batchcount;
3464 	struct kmem_cache_node *n;
3465 	int node = numa_mem_id();
3466 	LIST_HEAD(list);
3467 
3468 	batchcount = ac->batchcount;
3469 
3470 	check_irq_off();
3471 	n = get_node(cachep, node);
3472 	spin_lock(&n->list_lock);
3473 	if (n->shared) {
3474 		struct array_cache *shared_array = n->shared;
3475 		int max = shared_array->limit - shared_array->avail;
3476 		if (max) {
3477 			if (batchcount > max)
3478 				batchcount = max;
3479 			memcpy(&(shared_array->entry[shared_array->avail]),
3480 			       ac->entry, sizeof(void *) * batchcount);
3481 			shared_array->avail += batchcount;
3482 			goto free_done;
3483 		}
3484 	}
3485 
3486 	free_block(cachep, ac->entry, batchcount, node, &list);
3487 free_done:
3488 #if STATS
3489 	{
3490 		int i = 0;
3491 		struct page *page;
3492 
3493 		list_for_each_entry(page, &n->slabs_free, lru) {
3494 			BUG_ON(page->active);
3495 
3496 			i++;
3497 		}
3498 		STATS_SET_FREEABLE(cachep, i);
3499 	}
3500 #endif
3501 	spin_unlock(&n->list_lock);
3502 	slabs_destroy(cachep, &list);
3503 	ac->avail -= batchcount;
3504 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3505 }
3506 
3507 /*
3508  * Release an obj back to its cache. If the obj has a constructed state, it must
3509  * be in this state _before_ it is released.  Called with disabled ints.
3510  */
3511 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3512 				unsigned long caller)
3513 {
3514 	/* Put the object into the quarantine, don't touch it for now. */
3515 	if (kasan_slab_free(cachep, objp))
3516 		return;
3517 
3518 	___cache_free(cachep, objp, caller);
3519 }
3520 
3521 void ___cache_free(struct kmem_cache *cachep, void *objp,
3522 		unsigned long caller)
3523 {
3524 	struct array_cache *ac = cpu_cache_get(cachep);
3525 
3526 	check_irq_off();
3527 	kmemleak_free_recursive(objp, cachep->flags);
3528 	objp = cache_free_debugcheck(cachep, objp, caller);
3529 
3530 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3531 
3532 	/*
3533 	 * Skip calling cache_free_alien() when the platform is not numa.
3534 	 * This will avoid cache misses that happen while accessing slabp (which
3535 	 * is per page memory  reference) to get nodeid. Instead use a global
3536 	 * variable to skip the call, which is mostly likely to be present in
3537 	 * the cache.
3538 	 */
3539 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3540 		return;
3541 
3542 	if (ac->avail < ac->limit) {
3543 		STATS_INC_FREEHIT(cachep);
3544 	} else {
3545 		STATS_INC_FREEMISS(cachep);
3546 		cache_flusharray(cachep, ac);
3547 	}
3548 
3549 	if (sk_memalloc_socks()) {
3550 		struct page *page = virt_to_head_page(objp);
3551 
3552 		if (unlikely(PageSlabPfmemalloc(page))) {
3553 			cache_free_pfmemalloc(cachep, page, objp);
3554 			return;
3555 		}
3556 	}
3557 
3558 	ac->entry[ac->avail++] = objp;
3559 }
3560 
3561 /**
3562  * kmem_cache_alloc - Allocate an object
3563  * @cachep: The cache to allocate from.
3564  * @flags: See kmalloc().
3565  *
3566  * Allocate an object from this cache.  The flags are only relevant
3567  * if the cache has no available objects.
3568  */
3569 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3570 {
3571 	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3572 
3573 	kasan_slab_alloc(cachep, ret, flags);
3574 	trace_kmem_cache_alloc(_RET_IP_, ret,
3575 			       cachep->object_size, cachep->size, flags);
3576 
3577 	return ret;
3578 }
3579 EXPORT_SYMBOL(kmem_cache_alloc);
3580 
3581 static __always_inline void
3582 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3583 				  size_t size, void **p, unsigned long caller)
3584 {
3585 	size_t i;
3586 
3587 	for (i = 0; i < size; i++)
3588 		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3589 }
3590 
3591 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3592 			  void **p)
3593 {
3594 	size_t i;
3595 
3596 	s = slab_pre_alloc_hook(s, flags);
3597 	if (!s)
3598 		return 0;
3599 
3600 	cache_alloc_debugcheck_before(s, flags);
3601 
3602 	local_irq_disable();
3603 	for (i = 0; i < size; i++) {
3604 		void *objp = __do_cache_alloc(s, flags);
3605 
3606 		if (unlikely(!objp))
3607 			goto error;
3608 		p[i] = objp;
3609 	}
3610 	local_irq_enable();
3611 
3612 	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3613 
3614 	/* Clear memory outside IRQ disabled section */
3615 	if (unlikely(flags & __GFP_ZERO))
3616 		for (i = 0; i < size; i++)
3617 			memset(p[i], 0, s->object_size);
3618 
3619 	slab_post_alloc_hook(s, flags, size, p);
3620 	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3621 	return size;
3622 error:
3623 	local_irq_enable();
3624 	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3625 	slab_post_alloc_hook(s, flags, i, p);
3626 	__kmem_cache_free_bulk(s, i, p);
3627 	return 0;
3628 }
3629 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3630 
3631 #ifdef CONFIG_TRACING
3632 void *
3633 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3634 {
3635 	void *ret;
3636 
3637 	ret = slab_alloc(cachep, flags, _RET_IP_);
3638 
3639 	kasan_kmalloc(cachep, ret, size, flags);
3640 	trace_kmalloc(_RET_IP_, ret,
3641 		      size, cachep->size, flags);
3642 	return ret;
3643 }
3644 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3645 #endif
3646 
3647 #ifdef CONFIG_NUMA
3648 /**
3649  * kmem_cache_alloc_node - Allocate an object on the specified node
3650  * @cachep: The cache to allocate from.
3651  * @flags: See kmalloc().
3652  * @nodeid: node number of the target node.
3653  *
3654  * Identical to kmem_cache_alloc but it will allocate memory on the given
3655  * node, which can improve the performance for cpu bound structures.
3656  *
3657  * Fallback to other node is possible if __GFP_THISNODE is not set.
3658  */
3659 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3660 {
3661 	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3662 
3663 	kasan_slab_alloc(cachep, ret, flags);
3664 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3665 				    cachep->object_size, cachep->size,
3666 				    flags, nodeid);
3667 
3668 	return ret;
3669 }
3670 EXPORT_SYMBOL(kmem_cache_alloc_node);
3671 
3672 #ifdef CONFIG_TRACING
3673 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3674 				  gfp_t flags,
3675 				  int nodeid,
3676 				  size_t size)
3677 {
3678 	void *ret;
3679 
3680 	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3681 
3682 	kasan_kmalloc(cachep, ret, size, flags);
3683 	trace_kmalloc_node(_RET_IP_, ret,
3684 			   size, cachep->size,
3685 			   flags, nodeid);
3686 	return ret;
3687 }
3688 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3689 #endif
3690 
3691 static __always_inline void *
3692 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3693 {
3694 	struct kmem_cache *cachep;
3695 	void *ret;
3696 
3697 	cachep = kmalloc_slab(size, flags);
3698 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3699 		return cachep;
3700 	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3701 	kasan_kmalloc(cachep, ret, size, flags);
3702 
3703 	return ret;
3704 }
3705 
3706 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3707 {
3708 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3709 }
3710 EXPORT_SYMBOL(__kmalloc_node);
3711 
3712 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3713 		int node, unsigned long caller)
3714 {
3715 	return __do_kmalloc_node(size, flags, node, caller);
3716 }
3717 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3718 #endif /* CONFIG_NUMA */
3719 
3720 /**
3721  * __do_kmalloc - allocate memory
3722  * @size: how many bytes of memory are required.
3723  * @flags: the type of memory to allocate (see kmalloc).
3724  * @caller: function caller for debug tracking of the caller
3725  */
3726 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3727 					  unsigned long caller)
3728 {
3729 	struct kmem_cache *cachep;
3730 	void *ret;
3731 
3732 	cachep = kmalloc_slab(size, flags);
3733 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3734 		return cachep;
3735 	ret = slab_alloc(cachep, flags, caller);
3736 
3737 	kasan_kmalloc(cachep, ret, size, flags);
3738 	trace_kmalloc(caller, ret,
3739 		      size, cachep->size, flags);
3740 
3741 	return ret;
3742 }
3743 
3744 void *__kmalloc(size_t size, gfp_t flags)
3745 {
3746 	return __do_kmalloc(size, flags, _RET_IP_);
3747 }
3748 EXPORT_SYMBOL(__kmalloc);
3749 
3750 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3751 {
3752 	return __do_kmalloc(size, flags, caller);
3753 }
3754 EXPORT_SYMBOL(__kmalloc_track_caller);
3755 
3756 /**
3757  * kmem_cache_free - Deallocate an object
3758  * @cachep: The cache the allocation was from.
3759  * @objp: The previously allocated object.
3760  *
3761  * Free an object which was previously allocated from this
3762  * cache.
3763  */
3764 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3765 {
3766 	unsigned long flags;
3767 	cachep = cache_from_obj(cachep, objp);
3768 	if (!cachep)
3769 		return;
3770 
3771 	local_irq_save(flags);
3772 	debug_check_no_locks_freed(objp, cachep->object_size);
3773 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3774 		debug_check_no_obj_freed(objp, cachep->object_size);
3775 	__cache_free(cachep, objp, _RET_IP_);
3776 	local_irq_restore(flags);
3777 
3778 	trace_kmem_cache_free(_RET_IP_, objp);
3779 }
3780 EXPORT_SYMBOL(kmem_cache_free);
3781 
3782 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3783 {
3784 	struct kmem_cache *s;
3785 	size_t i;
3786 
3787 	local_irq_disable();
3788 	for (i = 0; i < size; i++) {
3789 		void *objp = p[i];
3790 
3791 		if (!orig_s) /* called via kfree_bulk */
3792 			s = virt_to_cache(objp);
3793 		else
3794 			s = cache_from_obj(orig_s, objp);
3795 
3796 		debug_check_no_locks_freed(objp, s->object_size);
3797 		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3798 			debug_check_no_obj_freed(objp, s->object_size);
3799 
3800 		__cache_free(s, objp, _RET_IP_);
3801 	}
3802 	local_irq_enable();
3803 
3804 	/* FIXME: add tracing */
3805 }
3806 EXPORT_SYMBOL(kmem_cache_free_bulk);
3807 
3808 /**
3809  * kfree - free previously allocated memory
3810  * @objp: pointer returned by kmalloc.
3811  *
3812  * If @objp is NULL, no operation is performed.
3813  *
3814  * Don't free memory not originally allocated by kmalloc()
3815  * or you will run into trouble.
3816  */
3817 void kfree(const void *objp)
3818 {
3819 	struct kmem_cache *c;
3820 	unsigned long flags;
3821 
3822 	trace_kfree(_RET_IP_, objp);
3823 
3824 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3825 		return;
3826 	local_irq_save(flags);
3827 	kfree_debugcheck(objp);
3828 	c = virt_to_cache(objp);
3829 	debug_check_no_locks_freed(objp, c->object_size);
3830 
3831 	debug_check_no_obj_freed(objp, c->object_size);
3832 	__cache_free(c, (void *)objp, _RET_IP_);
3833 	local_irq_restore(flags);
3834 }
3835 EXPORT_SYMBOL(kfree);
3836 
3837 /*
3838  * This initializes kmem_cache_node or resizes various caches for all nodes.
3839  */
3840 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3841 {
3842 	int ret;
3843 	int node;
3844 	struct kmem_cache_node *n;
3845 
3846 	for_each_online_node(node) {
3847 		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3848 		if (ret)
3849 			goto fail;
3850 
3851 	}
3852 
3853 	return 0;
3854 
3855 fail:
3856 	if (!cachep->list.next) {
3857 		/* Cache is not active yet. Roll back what we did */
3858 		node--;
3859 		while (node >= 0) {
3860 			n = get_node(cachep, node);
3861 			if (n) {
3862 				kfree(n->shared);
3863 				free_alien_cache(n->alien);
3864 				kfree(n);
3865 				cachep->node[node] = NULL;
3866 			}
3867 			node--;
3868 		}
3869 	}
3870 	return -ENOMEM;
3871 }
3872 
3873 /* Always called with the slab_mutex held */
3874 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3875 				int batchcount, int shared, gfp_t gfp)
3876 {
3877 	struct array_cache __percpu *cpu_cache, *prev;
3878 	int cpu;
3879 
3880 	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3881 	if (!cpu_cache)
3882 		return -ENOMEM;
3883 
3884 	prev = cachep->cpu_cache;
3885 	cachep->cpu_cache = cpu_cache;
3886 	kick_all_cpus_sync();
3887 
3888 	check_irq_on();
3889 	cachep->batchcount = batchcount;
3890 	cachep->limit = limit;
3891 	cachep->shared = shared;
3892 
3893 	if (!prev)
3894 		goto setup_node;
3895 
3896 	for_each_online_cpu(cpu) {
3897 		LIST_HEAD(list);
3898 		int node;
3899 		struct kmem_cache_node *n;
3900 		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3901 
3902 		node = cpu_to_mem(cpu);
3903 		n = get_node(cachep, node);
3904 		spin_lock_irq(&n->list_lock);
3905 		free_block(cachep, ac->entry, ac->avail, node, &list);
3906 		spin_unlock_irq(&n->list_lock);
3907 		slabs_destroy(cachep, &list);
3908 	}
3909 	free_percpu(prev);
3910 
3911 setup_node:
3912 	return setup_kmem_cache_nodes(cachep, gfp);
3913 }
3914 
3915 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3916 				int batchcount, int shared, gfp_t gfp)
3917 {
3918 	int ret;
3919 	struct kmem_cache *c;
3920 
3921 	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3922 
3923 	if (slab_state < FULL)
3924 		return ret;
3925 
3926 	if ((ret < 0) || !is_root_cache(cachep))
3927 		return ret;
3928 
3929 	lockdep_assert_held(&slab_mutex);
3930 	for_each_memcg_cache(c, cachep) {
3931 		/* return value determined by the root cache only */
3932 		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3933 	}
3934 
3935 	return ret;
3936 }
3937 
3938 /* Called with slab_mutex held always */
3939 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3940 {
3941 	int err;
3942 	int limit = 0;
3943 	int shared = 0;
3944 	int batchcount = 0;
3945 
3946 	err = cache_random_seq_create(cachep, cachep->num, gfp);
3947 	if (err)
3948 		goto end;
3949 
3950 	if (!is_root_cache(cachep)) {
3951 		struct kmem_cache *root = memcg_root_cache(cachep);
3952 		limit = root->limit;
3953 		shared = root->shared;
3954 		batchcount = root->batchcount;
3955 	}
3956 
3957 	if (limit && shared && batchcount)
3958 		goto skip_setup;
3959 	/*
3960 	 * The head array serves three purposes:
3961 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3962 	 * - reduce the number of spinlock operations.
3963 	 * - reduce the number of linked list operations on the slab and
3964 	 *   bufctl chains: array operations are cheaper.
3965 	 * The numbers are guessed, we should auto-tune as described by
3966 	 * Bonwick.
3967 	 */
3968 	if (cachep->size > 131072)
3969 		limit = 1;
3970 	else if (cachep->size > PAGE_SIZE)
3971 		limit = 8;
3972 	else if (cachep->size > 1024)
3973 		limit = 24;
3974 	else if (cachep->size > 256)
3975 		limit = 54;
3976 	else
3977 		limit = 120;
3978 
3979 	/*
3980 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3981 	 * allocation behaviour: Most allocs on one cpu, most free operations
3982 	 * on another cpu. For these cases, an efficient object passing between
3983 	 * cpus is necessary. This is provided by a shared array. The array
3984 	 * replaces Bonwick's magazine layer.
3985 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3986 	 * to a larger limit. Thus disabled by default.
3987 	 */
3988 	shared = 0;
3989 	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3990 		shared = 8;
3991 
3992 #if DEBUG
3993 	/*
3994 	 * With debugging enabled, large batchcount lead to excessively long
3995 	 * periods with disabled local interrupts. Limit the batchcount
3996 	 */
3997 	if (limit > 32)
3998 		limit = 32;
3999 #endif
4000 	batchcount = (limit + 1) / 2;
4001 skip_setup:
4002 	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4003 end:
4004 	if (err)
4005 		pr_err("enable_cpucache failed for %s, error %d\n",
4006 		       cachep->name, -err);
4007 	return err;
4008 }
4009 
4010 /*
4011  * Drain an array if it contains any elements taking the node lock only if
4012  * necessary. Note that the node listlock also protects the array_cache
4013  * if drain_array() is used on the shared array.
4014  */
4015 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4016 			 struct array_cache *ac, int node)
4017 {
4018 	LIST_HEAD(list);
4019 
4020 	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
4021 	check_mutex_acquired();
4022 
4023 	if (!ac || !ac->avail)
4024 		return;
4025 
4026 	if (ac->touched) {
4027 		ac->touched = 0;
4028 		return;
4029 	}
4030 
4031 	spin_lock_irq(&n->list_lock);
4032 	drain_array_locked(cachep, ac, node, false, &list);
4033 	spin_unlock_irq(&n->list_lock);
4034 
4035 	slabs_destroy(cachep, &list);
4036 }
4037 
4038 /**
4039  * cache_reap - Reclaim memory from caches.
4040  * @w: work descriptor
4041  *
4042  * Called from workqueue/eventd every few seconds.
4043  * Purpose:
4044  * - clear the per-cpu caches for this CPU.
4045  * - return freeable pages to the main free memory pool.
4046  *
4047  * If we cannot acquire the cache chain mutex then just give up - we'll try
4048  * again on the next iteration.
4049  */
4050 static void cache_reap(struct work_struct *w)
4051 {
4052 	struct kmem_cache *searchp;
4053 	struct kmem_cache_node *n;
4054 	int node = numa_mem_id();
4055 	struct delayed_work *work = to_delayed_work(w);
4056 
4057 	if (!mutex_trylock(&slab_mutex))
4058 		/* Give up. Setup the next iteration. */
4059 		goto out;
4060 
4061 	list_for_each_entry(searchp, &slab_caches, list) {
4062 		check_irq_on();
4063 
4064 		/*
4065 		 * We only take the node lock if absolutely necessary and we
4066 		 * have established with reasonable certainty that
4067 		 * we can do some work if the lock was obtained.
4068 		 */
4069 		n = get_node(searchp, node);
4070 
4071 		reap_alien(searchp, n);
4072 
4073 		drain_array(searchp, n, cpu_cache_get(searchp), node);
4074 
4075 		/*
4076 		 * These are racy checks but it does not matter
4077 		 * if we skip one check or scan twice.
4078 		 */
4079 		if (time_after(n->next_reap, jiffies))
4080 			goto next;
4081 
4082 		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4083 
4084 		drain_array(searchp, n, n->shared, node);
4085 
4086 		if (n->free_touched)
4087 			n->free_touched = 0;
4088 		else {
4089 			int freed;
4090 
4091 			freed = drain_freelist(searchp, n, (n->free_limit +
4092 				5 * searchp->num - 1) / (5 * searchp->num));
4093 			STATS_ADD_REAPED(searchp, freed);
4094 		}
4095 next:
4096 		cond_resched();
4097 	}
4098 	check_irq_on();
4099 	mutex_unlock(&slab_mutex);
4100 	next_reap_node();
4101 out:
4102 	/* Set up the next iteration */
4103 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
4104 }
4105 
4106 #ifdef CONFIG_SLABINFO
4107 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4108 {
4109 	struct page *page;
4110 	unsigned long active_objs;
4111 	unsigned long num_objs;
4112 	unsigned long active_slabs = 0;
4113 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4114 	const char *name;
4115 	char *error = NULL;
4116 	int node;
4117 	struct kmem_cache_node *n;
4118 
4119 	active_objs = 0;
4120 	num_slabs = 0;
4121 	for_each_kmem_cache_node(cachep, node, n) {
4122 
4123 		check_irq_on();
4124 		spin_lock_irq(&n->list_lock);
4125 
4126 		list_for_each_entry(page, &n->slabs_full, lru) {
4127 			if (page->active != cachep->num && !error)
4128 				error = "slabs_full accounting error";
4129 			active_objs += cachep->num;
4130 			active_slabs++;
4131 		}
4132 		list_for_each_entry(page, &n->slabs_partial, lru) {
4133 			if (page->active == cachep->num && !error)
4134 				error = "slabs_partial accounting error";
4135 			if (!page->active && !error)
4136 				error = "slabs_partial accounting error";
4137 			active_objs += page->active;
4138 			active_slabs++;
4139 		}
4140 		list_for_each_entry(page, &n->slabs_free, lru) {
4141 			if (page->active && !error)
4142 				error = "slabs_free accounting error";
4143 			num_slabs++;
4144 		}
4145 		free_objects += n->free_objects;
4146 		if (n->shared)
4147 			shared_avail += n->shared->avail;
4148 
4149 		spin_unlock_irq(&n->list_lock);
4150 	}
4151 	num_slabs += active_slabs;
4152 	num_objs = num_slabs * cachep->num;
4153 	if (num_objs - active_objs != free_objects && !error)
4154 		error = "free_objects accounting error";
4155 
4156 	name = cachep->name;
4157 	if (error)
4158 		pr_err("slab: cache %s error: %s\n", name, error);
4159 
4160 	sinfo->active_objs = active_objs;
4161 	sinfo->num_objs = num_objs;
4162 	sinfo->active_slabs = active_slabs;
4163 	sinfo->num_slabs = num_slabs;
4164 	sinfo->shared_avail = shared_avail;
4165 	sinfo->limit = cachep->limit;
4166 	sinfo->batchcount = cachep->batchcount;
4167 	sinfo->shared = cachep->shared;
4168 	sinfo->objects_per_slab = cachep->num;
4169 	sinfo->cache_order = cachep->gfporder;
4170 }
4171 
4172 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4173 {
4174 #if STATS
4175 	{			/* node stats */
4176 		unsigned long high = cachep->high_mark;
4177 		unsigned long allocs = cachep->num_allocations;
4178 		unsigned long grown = cachep->grown;
4179 		unsigned long reaped = cachep->reaped;
4180 		unsigned long errors = cachep->errors;
4181 		unsigned long max_freeable = cachep->max_freeable;
4182 		unsigned long node_allocs = cachep->node_allocs;
4183 		unsigned long node_frees = cachep->node_frees;
4184 		unsigned long overflows = cachep->node_overflow;
4185 
4186 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4187 			   allocs, high, grown,
4188 			   reaped, errors, max_freeable, node_allocs,
4189 			   node_frees, overflows);
4190 	}
4191 	/* cpu stats */
4192 	{
4193 		unsigned long allochit = atomic_read(&cachep->allochit);
4194 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4195 		unsigned long freehit = atomic_read(&cachep->freehit);
4196 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4197 
4198 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4199 			   allochit, allocmiss, freehit, freemiss);
4200 	}
4201 #endif
4202 }
4203 
4204 #define MAX_SLABINFO_WRITE 128
4205 /**
4206  * slabinfo_write - Tuning for the slab allocator
4207  * @file: unused
4208  * @buffer: user buffer
4209  * @count: data length
4210  * @ppos: unused
4211  */
4212 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4213 		       size_t count, loff_t *ppos)
4214 {
4215 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4216 	int limit, batchcount, shared, res;
4217 	struct kmem_cache *cachep;
4218 
4219 	if (count > MAX_SLABINFO_WRITE)
4220 		return -EINVAL;
4221 	if (copy_from_user(&kbuf, buffer, count))
4222 		return -EFAULT;
4223 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4224 
4225 	tmp = strchr(kbuf, ' ');
4226 	if (!tmp)
4227 		return -EINVAL;
4228 	*tmp = '\0';
4229 	tmp++;
4230 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4231 		return -EINVAL;
4232 
4233 	/* Find the cache in the chain of caches. */
4234 	mutex_lock(&slab_mutex);
4235 	res = -EINVAL;
4236 	list_for_each_entry(cachep, &slab_caches, list) {
4237 		if (!strcmp(cachep->name, kbuf)) {
4238 			if (limit < 1 || batchcount < 1 ||
4239 					batchcount > limit || shared < 0) {
4240 				res = 0;
4241 			} else {
4242 				res = do_tune_cpucache(cachep, limit,
4243 						       batchcount, shared,
4244 						       GFP_KERNEL);
4245 			}
4246 			break;
4247 		}
4248 	}
4249 	mutex_unlock(&slab_mutex);
4250 	if (res >= 0)
4251 		res = count;
4252 	return res;
4253 }
4254 
4255 #ifdef CONFIG_DEBUG_SLAB_LEAK
4256 
4257 static inline int add_caller(unsigned long *n, unsigned long v)
4258 {
4259 	unsigned long *p;
4260 	int l;
4261 	if (!v)
4262 		return 1;
4263 	l = n[1];
4264 	p = n + 2;
4265 	while (l) {
4266 		int i = l/2;
4267 		unsigned long *q = p + 2 * i;
4268 		if (*q == v) {
4269 			q[1]++;
4270 			return 1;
4271 		}
4272 		if (*q > v) {
4273 			l = i;
4274 		} else {
4275 			p = q + 2;
4276 			l -= i + 1;
4277 		}
4278 	}
4279 	if (++n[1] == n[0])
4280 		return 0;
4281 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4282 	p[0] = v;
4283 	p[1] = 1;
4284 	return 1;
4285 }
4286 
4287 static void handle_slab(unsigned long *n, struct kmem_cache *c,
4288 						struct page *page)
4289 {
4290 	void *p;
4291 	int i, j;
4292 	unsigned long v;
4293 
4294 	if (n[0] == n[1])
4295 		return;
4296 	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4297 		bool active = true;
4298 
4299 		for (j = page->active; j < c->num; j++) {
4300 			if (get_free_obj(page, j) == i) {
4301 				active = false;
4302 				break;
4303 			}
4304 		}
4305 
4306 		if (!active)
4307 			continue;
4308 
4309 		/*
4310 		 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4311 		 * mapping is established when actual object allocation and
4312 		 * we could mistakenly access the unmapped object in the cpu
4313 		 * cache.
4314 		 */
4315 		if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4316 			continue;
4317 
4318 		if (!add_caller(n, v))
4319 			return;
4320 	}
4321 }
4322 
4323 static void show_symbol(struct seq_file *m, unsigned long address)
4324 {
4325 #ifdef CONFIG_KALLSYMS
4326 	unsigned long offset, size;
4327 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4328 
4329 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4330 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4331 		if (modname[0])
4332 			seq_printf(m, " [%s]", modname);
4333 		return;
4334 	}
4335 #endif
4336 	seq_printf(m, "%p", (void *)address);
4337 }
4338 
4339 static int leaks_show(struct seq_file *m, void *p)
4340 {
4341 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4342 	struct page *page;
4343 	struct kmem_cache_node *n;
4344 	const char *name;
4345 	unsigned long *x = m->private;
4346 	int node;
4347 	int i;
4348 
4349 	if (!(cachep->flags & SLAB_STORE_USER))
4350 		return 0;
4351 	if (!(cachep->flags & SLAB_RED_ZONE))
4352 		return 0;
4353 
4354 	/*
4355 	 * Set store_user_clean and start to grab stored user information
4356 	 * for all objects on this cache. If some alloc/free requests comes
4357 	 * during the processing, information would be wrong so restart
4358 	 * whole processing.
4359 	 */
4360 	do {
4361 		set_store_user_clean(cachep);
4362 		drain_cpu_caches(cachep);
4363 
4364 		x[1] = 0;
4365 
4366 		for_each_kmem_cache_node(cachep, node, n) {
4367 
4368 			check_irq_on();
4369 			spin_lock_irq(&n->list_lock);
4370 
4371 			list_for_each_entry(page, &n->slabs_full, lru)
4372 				handle_slab(x, cachep, page);
4373 			list_for_each_entry(page, &n->slabs_partial, lru)
4374 				handle_slab(x, cachep, page);
4375 			spin_unlock_irq(&n->list_lock);
4376 		}
4377 	} while (!is_store_user_clean(cachep));
4378 
4379 	name = cachep->name;
4380 	if (x[0] == x[1]) {
4381 		/* Increase the buffer size */
4382 		mutex_unlock(&slab_mutex);
4383 		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4384 		if (!m->private) {
4385 			/* Too bad, we are really out */
4386 			m->private = x;
4387 			mutex_lock(&slab_mutex);
4388 			return -ENOMEM;
4389 		}
4390 		*(unsigned long *)m->private = x[0] * 2;
4391 		kfree(x);
4392 		mutex_lock(&slab_mutex);
4393 		/* Now make sure this entry will be retried */
4394 		m->count = m->size;
4395 		return 0;
4396 	}
4397 	for (i = 0; i < x[1]; i++) {
4398 		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4399 		show_symbol(m, x[2*i+2]);
4400 		seq_putc(m, '\n');
4401 	}
4402 
4403 	return 0;
4404 }
4405 
4406 static const struct seq_operations slabstats_op = {
4407 	.start = slab_start,
4408 	.next = slab_next,
4409 	.stop = slab_stop,
4410 	.show = leaks_show,
4411 };
4412 
4413 static int slabstats_open(struct inode *inode, struct file *file)
4414 {
4415 	unsigned long *n;
4416 
4417 	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4418 	if (!n)
4419 		return -ENOMEM;
4420 
4421 	*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4422 
4423 	return 0;
4424 }
4425 
4426 static const struct file_operations proc_slabstats_operations = {
4427 	.open		= slabstats_open,
4428 	.read		= seq_read,
4429 	.llseek		= seq_lseek,
4430 	.release	= seq_release_private,
4431 };
4432 #endif
4433 
4434 static int __init slab_proc_init(void)
4435 {
4436 #ifdef CONFIG_DEBUG_SLAB_LEAK
4437 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4438 #endif
4439 	return 0;
4440 }
4441 module_init(slab_proc_init);
4442 #endif
4443 
4444 /**
4445  * ksize - get the actual amount of memory allocated for a given object
4446  * @objp: Pointer to the object
4447  *
4448  * kmalloc may internally round up allocations and return more memory
4449  * than requested. ksize() can be used to determine the actual amount of
4450  * memory allocated. The caller may use this additional memory, even though
4451  * a smaller amount of memory was initially specified with the kmalloc call.
4452  * The caller must guarantee that objp points to a valid object previously
4453  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4454  * must not be freed during the duration of the call.
4455  */
4456 size_t ksize(const void *objp)
4457 {
4458 	size_t size;
4459 
4460 	BUG_ON(!objp);
4461 	if (unlikely(objp == ZERO_SIZE_PTR))
4462 		return 0;
4463 
4464 	size = virt_to_cache(objp)->object_size;
4465 	/* We assume that ksize callers could use the whole allocated area,
4466 	 * so we need to unpoison this area.
4467 	 */
4468 	kasan_unpoison_shadow(objp, size);
4469 
4470 	return size;
4471 }
4472 EXPORT_SYMBOL(ksize);
4473