xref: /openbmc/linux/mm/slab.c (revision 7bcae826)
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'slab_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/kmemleak.h>
110 #include	<linux/mempolicy.h>
111 #include	<linux/mutex.h>
112 #include	<linux/fault-inject.h>
113 #include	<linux/rtmutex.h>
114 #include	<linux/reciprocal_div.h>
115 #include	<linux/debugobjects.h>
116 #include	<linux/kmemcheck.h>
117 #include	<linux/memory.h>
118 #include	<linux/prefetch.h>
119 
120 #include	<net/sock.h>
121 
122 #include	<asm/cacheflush.h>
123 #include	<asm/tlbflush.h>
124 #include	<asm/page.h>
125 
126 #include <trace/events/kmem.h>
127 
128 #include	"internal.h"
129 
130 #include	"slab.h"
131 
132 /*
133  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
134  *		  0 for faster, smaller code (especially in the critical paths).
135  *
136  * STATS	- 1 to collect stats for /proc/slabinfo.
137  *		  0 for faster, smaller code (especially in the critical paths).
138  *
139  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
140  */
141 
142 #ifdef CONFIG_DEBUG_SLAB
143 #define	DEBUG		1
144 #define	STATS		1
145 #define	FORCED_DEBUG	1
146 #else
147 #define	DEBUG		0
148 #define	STATS		0
149 #define	FORCED_DEBUG	0
150 #endif
151 
152 /* Shouldn't this be in a header file somewhere? */
153 #define	BYTES_PER_WORD		sizeof(void *)
154 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
155 
156 #ifndef ARCH_KMALLOC_FLAGS
157 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158 #endif
159 
160 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162 
163 #if FREELIST_BYTE_INDEX
164 typedef unsigned char freelist_idx_t;
165 #else
166 typedef unsigned short freelist_idx_t;
167 #endif
168 
169 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170 
171 /*
172  * struct array_cache
173  *
174  * Purpose:
175  * - LIFO ordering, to hand out cache-warm objects from _alloc
176  * - reduce the number of linked list operations
177  * - reduce spinlock operations
178  *
179  * The limit is stored in the per-cpu structure to reduce the data cache
180  * footprint.
181  *
182  */
183 struct array_cache {
184 	unsigned int avail;
185 	unsigned int limit;
186 	unsigned int batchcount;
187 	unsigned int touched;
188 	void *entry[];	/*
189 			 * Must have this definition in here for the proper
190 			 * alignment of array_cache. Also simplifies accessing
191 			 * the entries.
192 			 */
193 };
194 
195 struct alien_cache {
196 	spinlock_t lock;
197 	struct array_cache ac;
198 };
199 
200 /*
201  * Need this for bootstrapping a per node allocator.
202  */
203 #define NUM_INIT_LISTS (2 * MAX_NUMNODES)
204 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
205 #define	CACHE_CACHE 0
206 #define	SIZE_NODE (MAX_NUMNODES)
207 
208 static int drain_freelist(struct kmem_cache *cache,
209 			struct kmem_cache_node *n, int tofree);
210 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
211 			int node, struct list_head *list);
212 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
214 static void cache_reap(struct work_struct *unused);
215 
216 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
217 						void **list);
218 static inline void fixup_slab_list(struct kmem_cache *cachep,
219 				struct kmem_cache_node *n, struct page *page,
220 				void **list);
221 static int slab_early_init = 1;
222 
223 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
224 
225 static void kmem_cache_node_init(struct kmem_cache_node *parent)
226 {
227 	INIT_LIST_HEAD(&parent->slabs_full);
228 	INIT_LIST_HEAD(&parent->slabs_partial);
229 	INIT_LIST_HEAD(&parent->slabs_free);
230 	parent->total_slabs = 0;
231 	parent->free_slabs = 0;
232 	parent->shared = NULL;
233 	parent->alien = NULL;
234 	parent->colour_next = 0;
235 	spin_lock_init(&parent->list_lock);
236 	parent->free_objects = 0;
237 	parent->free_touched = 0;
238 }
239 
240 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
241 	do {								\
242 		INIT_LIST_HEAD(listp);					\
243 		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
244 	} while (0)
245 
246 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
247 	do {								\
248 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
249 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
250 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
251 	} while (0)
252 
253 #define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
254 #define CFLGS_OFF_SLAB		(0x80000000UL)
255 #define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
256 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
257 
258 #define BATCHREFILL_LIMIT	16
259 /*
260  * Optimization question: fewer reaps means less probability for unnessary
261  * cpucache drain/refill cycles.
262  *
263  * OTOH the cpuarrays can contain lots of objects,
264  * which could lock up otherwise freeable slabs.
265  */
266 #define REAPTIMEOUT_AC		(2*HZ)
267 #define REAPTIMEOUT_NODE	(4*HZ)
268 
269 #if STATS
270 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
271 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
272 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
273 #define	STATS_INC_GROWN(x)	((x)->grown++)
274 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
275 #define	STATS_SET_HIGH(x)						\
276 	do {								\
277 		if ((x)->num_active > (x)->high_mark)			\
278 			(x)->high_mark = (x)->num_active;		\
279 	} while (0)
280 #define	STATS_INC_ERR(x)	((x)->errors++)
281 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
282 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
283 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
284 #define	STATS_SET_FREEABLE(x, i)					\
285 	do {								\
286 		if ((x)->max_freeable < i)				\
287 			(x)->max_freeable = i;				\
288 	} while (0)
289 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
290 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
291 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
292 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
293 #else
294 #define	STATS_INC_ACTIVE(x)	do { } while (0)
295 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
296 #define	STATS_INC_ALLOCED(x)	do { } while (0)
297 #define	STATS_INC_GROWN(x)	do { } while (0)
298 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
299 #define	STATS_SET_HIGH(x)	do { } while (0)
300 #define	STATS_INC_ERR(x)	do { } while (0)
301 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
302 #define	STATS_INC_NODEFREES(x)	do { } while (0)
303 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
304 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
305 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
306 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
307 #define STATS_INC_FREEHIT(x)	do { } while (0)
308 #define STATS_INC_FREEMISS(x)	do { } while (0)
309 #endif
310 
311 #if DEBUG
312 
313 /*
314  * memory layout of objects:
315  * 0		: objp
316  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
317  * 		the end of an object is aligned with the end of the real
318  * 		allocation. Catches writes behind the end of the allocation.
319  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
320  * 		redzone word.
321  * cachep->obj_offset: The real object.
322  * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
323  * cachep->size - 1* BYTES_PER_WORD: last caller address
324  *					[BYTES_PER_WORD long]
325  */
326 static int obj_offset(struct kmem_cache *cachep)
327 {
328 	return cachep->obj_offset;
329 }
330 
331 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
332 {
333 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
334 	return (unsigned long long*) (objp + obj_offset(cachep) -
335 				      sizeof(unsigned long long));
336 }
337 
338 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
339 {
340 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
341 	if (cachep->flags & SLAB_STORE_USER)
342 		return (unsigned long long *)(objp + cachep->size -
343 					      sizeof(unsigned long long) -
344 					      REDZONE_ALIGN);
345 	return (unsigned long long *) (objp + cachep->size -
346 				       sizeof(unsigned long long));
347 }
348 
349 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
350 {
351 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
352 	return (void **)(objp + cachep->size - BYTES_PER_WORD);
353 }
354 
355 #else
356 
357 #define obj_offset(x)			0
358 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
359 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
360 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
361 
362 #endif
363 
364 #ifdef CONFIG_DEBUG_SLAB_LEAK
365 
366 static inline bool is_store_user_clean(struct kmem_cache *cachep)
367 {
368 	return atomic_read(&cachep->store_user_clean) == 1;
369 }
370 
371 static inline void set_store_user_clean(struct kmem_cache *cachep)
372 {
373 	atomic_set(&cachep->store_user_clean, 1);
374 }
375 
376 static inline void set_store_user_dirty(struct kmem_cache *cachep)
377 {
378 	if (is_store_user_clean(cachep))
379 		atomic_set(&cachep->store_user_clean, 0);
380 }
381 
382 #else
383 static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
384 
385 #endif
386 
387 /*
388  * Do not go above this order unless 0 objects fit into the slab or
389  * overridden on the command line.
390  */
391 #define	SLAB_MAX_ORDER_HI	1
392 #define	SLAB_MAX_ORDER_LO	0
393 static int slab_max_order = SLAB_MAX_ORDER_LO;
394 static bool slab_max_order_set __initdata;
395 
396 static inline struct kmem_cache *virt_to_cache(const void *obj)
397 {
398 	struct page *page = virt_to_head_page(obj);
399 	return page->slab_cache;
400 }
401 
402 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
403 				 unsigned int idx)
404 {
405 	return page->s_mem + cache->size * idx;
406 }
407 
408 /*
409  * We want to avoid an expensive divide : (offset / cache->size)
410  *   Using the fact that size is a constant for a particular cache,
411  *   we can replace (offset / cache->size) by
412  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
413  */
414 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
415 					const struct page *page, void *obj)
416 {
417 	u32 offset = (obj - page->s_mem);
418 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
419 }
420 
421 #define BOOT_CPUCACHE_ENTRIES	1
422 /* internal cache of cache description objs */
423 static struct kmem_cache kmem_cache_boot = {
424 	.batchcount = 1,
425 	.limit = BOOT_CPUCACHE_ENTRIES,
426 	.shared = 1,
427 	.size = sizeof(struct kmem_cache),
428 	.name = "kmem_cache",
429 };
430 
431 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
432 
433 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
434 {
435 	return this_cpu_ptr(cachep->cpu_cache);
436 }
437 
438 /*
439  * Calculate the number of objects and left-over bytes for a given buffer size.
440  */
441 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
442 		unsigned long flags, size_t *left_over)
443 {
444 	unsigned int num;
445 	size_t slab_size = PAGE_SIZE << gfporder;
446 
447 	/*
448 	 * The slab management structure can be either off the slab or
449 	 * on it. For the latter case, the memory allocated for a
450 	 * slab is used for:
451 	 *
452 	 * - @buffer_size bytes for each object
453 	 * - One freelist_idx_t for each object
454 	 *
455 	 * We don't need to consider alignment of freelist because
456 	 * freelist will be at the end of slab page. The objects will be
457 	 * at the correct alignment.
458 	 *
459 	 * If the slab management structure is off the slab, then the
460 	 * alignment will already be calculated into the size. Because
461 	 * the slabs are all pages aligned, the objects will be at the
462 	 * correct alignment when allocated.
463 	 */
464 	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
465 		num = slab_size / buffer_size;
466 		*left_over = slab_size % buffer_size;
467 	} else {
468 		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
469 		*left_over = slab_size %
470 			(buffer_size + sizeof(freelist_idx_t));
471 	}
472 
473 	return num;
474 }
475 
476 #if DEBUG
477 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
478 
479 static void __slab_error(const char *function, struct kmem_cache *cachep,
480 			char *msg)
481 {
482 	pr_err("slab error in %s(): cache `%s': %s\n",
483 	       function, cachep->name, msg);
484 	dump_stack();
485 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
486 }
487 #endif
488 
489 /*
490  * By default on NUMA we use alien caches to stage the freeing of
491  * objects allocated from other nodes. This causes massive memory
492  * inefficiencies when using fake NUMA setup to split memory into a
493  * large number of small nodes, so it can be disabled on the command
494  * line
495   */
496 
497 static int use_alien_caches __read_mostly = 1;
498 static int __init noaliencache_setup(char *s)
499 {
500 	use_alien_caches = 0;
501 	return 1;
502 }
503 __setup("noaliencache", noaliencache_setup);
504 
505 static int __init slab_max_order_setup(char *str)
506 {
507 	get_option(&str, &slab_max_order);
508 	slab_max_order = slab_max_order < 0 ? 0 :
509 				min(slab_max_order, MAX_ORDER - 1);
510 	slab_max_order_set = true;
511 
512 	return 1;
513 }
514 __setup("slab_max_order=", slab_max_order_setup);
515 
516 #ifdef CONFIG_NUMA
517 /*
518  * Special reaping functions for NUMA systems called from cache_reap().
519  * These take care of doing round robin flushing of alien caches (containing
520  * objects freed on different nodes from which they were allocated) and the
521  * flushing of remote pcps by calling drain_node_pages.
522  */
523 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
524 
525 static void init_reap_node(int cpu)
526 {
527 	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
528 						    node_online_map);
529 }
530 
531 static void next_reap_node(void)
532 {
533 	int node = __this_cpu_read(slab_reap_node);
534 
535 	node = next_node_in(node, node_online_map);
536 	__this_cpu_write(slab_reap_node, node);
537 }
538 
539 #else
540 #define init_reap_node(cpu) do { } while (0)
541 #define next_reap_node(void) do { } while (0)
542 #endif
543 
544 /*
545  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
546  * via the workqueue/eventd.
547  * Add the CPU number into the expiration time to minimize the possibility of
548  * the CPUs getting into lockstep and contending for the global cache chain
549  * lock.
550  */
551 static void start_cpu_timer(int cpu)
552 {
553 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
554 
555 	if (reap_work->work.func == NULL) {
556 		init_reap_node(cpu);
557 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
558 		schedule_delayed_work_on(cpu, reap_work,
559 					__round_jiffies_relative(HZ, cpu));
560 	}
561 }
562 
563 static void init_arraycache(struct array_cache *ac, int limit, int batch)
564 {
565 	/*
566 	 * The array_cache structures contain pointers to free object.
567 	 * However, when such objects are allocated or transferred to another
568 	 * cache the pointers are not cleared and they could be counted as
569 	 * valid references during a kmemleak scan. Therefore, kmemleak must
570 	 * not scan such objects.
571 	 */
572 	kmemleak_no_scan(ac);
573 	if (ac) {
574 		ac->avail = 0;
575 		ac->limit = limit;
576 		ac->batchcount = batch;
577 		ac->touched = 0;
578 	}
579 }
580 
581 static struct array_cache *alloc_arraycache(int node, int entries,
582 					    int batchcount, gfp_t gfp)
583 {
584 	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
585 	struct array_cache *ac = NULL;
586 
587 	ac = kmalloc_node(memsize, gfp, node);
588 	init_arraycache(ac, entries, batchcount);
589 	return ac;
590 }
591 
592 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
593 					struct page *page, void *objp)
594 {
595 	struct kmem_cache_node *n;
596 	int page_node;
597 	LIST_HEAD(list);
598 
599 	page_node = page_to_nid(page);
600 	n = get_node(cachep, page_node);
601 
602 	spin_lock(&n->list_lock);
603 	free_block(cachep, &objp, 1, page_node, &list);
604 	spin_unlock(&n->list_lock);
605 
606 	slabs_destroy(cachep, &list);
607 }
608 
609 /*
610  * Transfer objects in one arraycache to another.
611  * Locking must be handled by the caller.
612  *
613  * Return the number of entries transferred.
614  */
615 static int transfer_objects(struct array_cache *to,
616 		struct array_cache *from, unsigned int max)
617 {
618 	/* Figure out how many entries to transfer */
619 	int nr = min3(from->avail, max, to->limit - to->avail);
620 
621 	if (!nr)
622 		return 0;
623 
624 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
625 			sizeof(void *) *nr);
626 
627 	from->avail -= nr;
628 	to->avail += nr;
629 	return nr;
630 }
631 
632 #ifndef CONFIG_NUMA
633 
634 #define drain_alien_cache(cachep, alien) do { } while (0)
635 #define reap_alien(cachep, n) do { } while (0)
636 
637 static inline struct alien_cache **alloc_alien_cache(int node,
638 						int limit, gfp_t gfp)
639 {
640 	return NULL;
641 }
642 
643 static inline void free_alien_cache(struct alien_cache **ac_ptr)
644 {
645 }
646 
647 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
648 {
649 	return 0;
650 }
651 
652 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
653 		gfp_t flags)
654 {
655 	return NULL;
656 }
657 
658 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
659 		 gfp_t flags, int nodeid)
660 {
661 	return NULL;
662 }
663 
664 static inline gfp_t gfp_exact_node(gfp_t flags)
665 {
666 	return flags & ~__GFP_NOFAIL;
667 }
668 
669 #else	/* CONFIG_NUMA */
670 
671 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
672 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
673 
674 static struct alien_cache *__alloc_alien_cache(int node, int entries,
675 						int batch, gfp_t gfp)
676 {
677 	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
678 	struct alien_cache *alc = NULL;
679 
680 	alc = kmalloc_node(memsize, gfp, node);
681 	init_arraycache(&alc->ac, entries, batch);
682 	spin_lock_init(&alc->lock);
683 	return alc;
684 }
685 
686 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
687 {
688 	struct alien_cache **alc_ptr;
689 	size_t memsize = sizeof(void *) * nr_node_ids;
690 	int i;
691 
692 	if (limit > 1)
693 		limit = 12;
694 	alc_ptr = kzalloc_node(memsize, gfp, node);
695 	if (!alc_ptr)
696 		return NULL;
697 
698 	for_each_node(i) {
699 		if (i == node || !node_online(i))
700 			continue;
701 		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
702 		if (!alc_ptr[i]) {
703 			for (i--; i >= 0; i--)
704 				kfree(alc_ptr[i]);
705 			kfree(alc_ptr);
706 			return NULL;
707 		}
708 	}
709 	return alc_ptr;
710 }
711 
712 static void free_alien_cache(struct alien_cache **alc_ptr)
713 {
714 	int i;
715 
716 	if (!alc_ptr)
717 		return;
718 	for_each_node(i)
719 	    kfree(alc_ptr[i]);
720 	kfree(alc_ptr);
721 }
722 
723 static void __drain_alien_cache(struct kmem_cache *cachep,
724 				struct array_cache *ac, int node,
725 				struct list_head *list)
726 {
727 	struct kmem_cache_node *n = get_node(cachep, node);
728 
729 	if (ac->avail) {
730 		spin_lock(&n->list_lock);
731 		/*
732 		 * Stuff objects into the remote nodes shared array first.
733 		 * That way we could avoid the overhead of putting the objects
734 		 * into the free lists and getting them back later.
735 		 */
736 		if (n->shared)
737 			transfer_objects(n->shared, ac, ac->limit);
738 
739 		free_block(cachep, ac->entry, ac->avail, node, list);
740 		ac->avail = 0;
741 		spin_unlock(&n->list_lock);
742 	}
743 }
744 
745 /*
746  * Called from cache_reap() to regularly drain alien caches round robin.
747  */
748 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
749 {
750 	int node = __this_cpu_read(slab_reap_node);
751 
752 	if (n->alien) {
753 		struct alien_cache *alc = n->alien[node];
754 		struct array_cache *ac;
755 
756 		if (alc) {
757 			ac = &alc->ac;
758 			if (ac->avail && spin_trylock_irq(&alc->lock)) {
759 				LIST_HEAD(list);
760 
761 				__drain_alien_cache(cachep, ac, node, &list);
762 				spin_unlock_irq(&alc->lock);
763 				slabs_destroy(cachep, &list);
764 			}
765 		}
766 	}
767 }
768 
769 static void drain_alien_cache(struct kmem_cache *cachep,
770 				struct alien_cache **alien)
771 {
772 	int i = 0;
773 	struct alien_cache *alc;
774 	struct array_cache *ac;
775 	unsigned long flags;
776 
777 	for_each_online_node(i) {
778 		alc = alien[i];
779 		if (alc) {
780 			LIST_HEAD(list);
781 
782 			ac = &alc->ac;
783 			spin_lock_irqsave(&alc->lock, flags);
784 			__drain_alien_cache(cachep, ac, i, &list);
785 			spin_unlock_irqrestore(&alc->lock, flags);
786 			slabs_destroy(cachep, &list);
787 		}
788 	}
789 }
790 
791 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
792 				int node, int page_node)
793 {
794 	struct kmem_cache_node *n;
795 	struct alien_cache *alien = NULL;
796 	struct array_cache *ac;
797 	LIST_HEAD(list);
798 
799 	n = get_node(cachep, node);
800 	STATS_INC_NODEFREES(cachep);
801 	if (n->alien && n->alien[page_node]) {
802 		alien = n->alien[page_node];
803 		ac = &alien->ac;
804 		spin_lock(&alien->lock);
805 		if (unlikely(ac->avail == ac->limit)) {
806 			STATS_INC_ACOVERFLOW(cachep);
807 			__drain_alien_cache(cachep, ac, page_node, &list);
808 		}
809 		ac->entry[ac->avail++] = objp;
810 		spin_unlock(&alien->lock);
811 		slabs_destroy(cachep, &list);
812 	} else {
813 		n = get_node(cachep, page_node);
814 		spin_lock(&n->list_lock);
815 		free_block(cachep, &objp, 1, page_node, &list);
816 		spin_unlock(&n->list_lock);
817 		slabs_destroy(cachep, &list);
818 	}
819 	return 1;
820 }
821 
822 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
823 {
824 	int page_node = page_to_nid(virt_to_page(objp));
825 	int node = numa_mem_id();
826 	/*
827 	 * Make sure we are not freeing a object from another node to the array
828 	 * cache on this cpu.
829 	 */
830 	if (likely(node == page_node))
831 		return 0;
832 
833 	return __cache_free_alien(cachep, objp, node, page_node);
834 }
835 
836 /*
837  * Construct gfp mask to allocate from a specific node but do not reclaim or
838  * warn about failures.
839  */
840 static inline gfp_t gfp_exact_node(gfp_t flags)
841 {
842 	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
843 }
844 #endif
845 
846 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
847 {
848 	struct kmem_cache_node *n;
849 
850 	/*
851 	 * Set up the kmem_cache_node for cpu before we can
852 	 * begin anything. Make sure some other cpu on this
853 	 * node has not already allocated this
854 	 */
855 	n = get_node(cachep, node);
856 	if (n) {
857 		spin_lock_irq(&n->list_lock);
858 		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
859 				cachep->num;
860 		spin_unlock_irq(&n->list_lock);
861 
862 		return 0;
863 	}
864 
865 	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
866 	if (!n)
867 		return -ENOMEM;
868 
869 	kmem_cache_node_init(n);
870 	n->next_reap = jiffies + REAPTIMEOUT_NODE +
871 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
872 
873 	n->free_limit =
874 		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
875 
876 	/*
877 	 * The kmem_cache_nodes don't come and go as CPUs
878 	 * come and go.  slab_mutex is sufficient
879 	 * protection here.
880 	 */
881 	cachep->node[node] = n;
882 
883 	return 0;
884 }
885 
886 #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
887 /*
888  * Allocates and initializes node for a node on each slab cache, used for
889  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
890  * will be allocated off-node since memory is not yet online for the new node.
891  * When hotplugging memory or a cpu, existing node are not replaced if
892  * already in use.
893  *
894  * Must hold slab_mutex.
895  */
896 static int init_cache_node_node(int node)
897 {
898 	int ret;
899 	struct kmem_cache *cachep;
900 
901 	list_for_each_entry(cachep, &slab_caches, list) {
902 		ret = init_cache_node(cachep, node, GFP_KERNEL);
903 		if (ret)
904 			return ret;
905 	}
906 
907 	return 0;
908 }
909 #endif
910 
911 static int setup_kmem_cache_node(struct kmem_cache *cachep,
912 				int node, gfp_t gfp, bool force_change)
913 {
914 	int ret = -ENOMEM;
915 	struct kmem_cache_node *n;
916 	struct array_cache *old_shared = NULL;
917 	struct array_cache *new_shared = NULL;
918 	struct alien_cache **new_alien = NULL;
919 	LIST_HEAD(list);
920 
921 	if (use_alien_caches) {
922 		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
923 		if (!new_alien)
924 			goto fail;
925 	}
926 
927 	if (cachep->shared) {
928 		new_shared = alloc_arraycache(node,
929 			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
930 		if (!new_shared)
931 			goto fail;
932 	}
933 
934 	ret = init_cache_node(cachep, node, gfp);
935 	if (ret)
936 		goto fail;
937 
938 	n = get_node(cachep, node);
939 	spin_lock_irq(&n->list_lock);
940 	if (n->shared && force_change) {
941 		free_block(cachep, n->shared->entry,
942 				n->shared->avail, node, &list);
943 		n->shared->avail = 0;
944 	}
945 
946 	if (!n->shared || force_change) {
947 		old_shared = n->shared;
948 		n->shared = new_shared;
949 		new_shared = NULL;
950 	}
951 
952 	if (!n->alien) {
953 		n->alien = new_alien;
954 		new_alien = NULL;
955 	}
956 
957 	spin_unlock_irq(&n->list_lock);
958 	slabs_destroy(cachep, &list);
959 
960 	/*
961 	 * To protect lockless access to n->shared during irq disabled context.
962 	 * If n->shared isn't NULL in irq disabled context, accessing to it is
963 	 * guaranteed to be valid until irq is re-enabled, because it will be
964 	 * freed after synchronize_sched().
965 	 */
966 	if (old_shared && force_change)
967 		synchronize_sched();
968 
969 fail:
970 	kfree(old_shared);
971 	kfree(new_shared);
972 	free_alien_cache(new_alien);
973 
974 	return ret;
975 }
976 
977 #ifdef CONFIG_SMP
978 
979 static void cpuup_canceled(long cpu)
980 {
981 	struct kmem_cache *cachep;
982 	struct kmem_cache_node *n = NULL;
983 	int node = cpu_to_mem(cpu);
984 	const struct cpumask *mask = cpumask_of_node(node);
985 
986 	list_for_each_entry(cachep, &slab_caches, list) {
987 		struct array_cache *nc;
988 		struct array_cache *shared;
989 		struct alien_cache **alien;
990 		LIST_HEAD(list);
991 
992 		n = get_node(cachep, node);
993 		if (!n)
994 			continue;
995 
996 		spin_lock_irq(&n->list_lock);
997 
998 		/* Free limit for this kmem_cache_node */
999 		n->free_limit -= cachep->batchcount;
1000 
1001 		/* cpu is dead; no one can alloc from it. */
1002 		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1003 		if (nc) {
1004 			free_block(cachep, nc->entry, nc->avail, node, &list);
1005 			nc->avail = 0;
1006 		}
1007 
1008 		if (!cpumask_empty(mask)) {
1009 			spin_unlock_irq(&n->list_lock);
1010 			goto free_slab;
1011 		}
1012 
1013 		shared = n->shared;
1014 		if (shared) {
1015 			free_block(cachep, shared->entry,
1016 				   shared->avail, node, &list);
1017 			n->shared = NULL;
1018 		}
1019 
1020 		alien = n->alien;
1021 		n->alien = NULL;
1022 
1023 		spin_unlock_irq(&n->list_lock);
1024 
1025 		kfree(shared);
1026 		if (alien) {
1027 			drain_alien_cache(cachep, alien);
1028 			free_alien_cache(alien);
1029 		}
1030 
1031 free_slab:
1032 		slabs_destroy(cachep, &list);
1033 	}
1034 	/*
1035 	 * In the previous loop, all the objects were freed to
1036 	 * the respective cache's slabs,  now we can go ahead and
1037 	 * shrink each nodelist to its limit.
1038 	 */
1039 	list_for_each_entry(cachep, &slab_caches, list) {
1040 		n = get_node(cachep, node);
1041 		if (!n)
1042 			continue;
1043 		drain_freelist(cachep, n, INT_MAX);
1044 	}
1045 }
1046 
1047 static int cpuup_prepare(long cpu)
1048 {
1049 	struct kmem_cache *cachep;
1050 	int node = cpu_to_mem(cpu);
1051 	int err;
1052 
1053 	/*
1054 	 * We need to do this right in the beginning since
1055 	 * alloc_arraycache's are going to use this list.
1056 	 * kmalloc_node allows us to add the slab to the right
1057 	 * kmem_cache_node and not this cpu's kmem_cache_node
1058 	 */
1059 	err = init_cache_node_node(node);
1060 	if (err < 0)
1061 		goto bad;
1062 
1063 	/*
1064 	 * Now we can go ahead with allocating the shared arrays and
1065 	 * array caches
1066 	 */
1067 	list_for_each_entry(cachep, &slab_caches, list) {
1068 		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1069 		if (err)
1070 			goto bad;
1071 	}
1072 
1073 	return 0;
1074 bad:
1075 	cpuup_canceled(cpu);
1076 	return -ENOMEM;
1077 }
1078 
1079 int slab_prepare_cpu(unsigned int cpu)
1080 {
1081 	int err;
1082 
1083 	mutex_lock(&slab_mutex);
1084 	err = cpuup_prepare(cpu);
1085 	mutex_unlock(&slab_mutex);
1086 	return err;
1087 }
1088 
1089 /*
1090  * This is called for a failed online attempt and for a successful
1091  * offline.
1092  *
1093  * Even if all the cpus of a node are down, we don't free the
1094  * kmem_list3 of any cache. This to avoid a race between cpu_down, and
1095  * a kmalloc allocation from another cpu for memory from the node of
1096  * the cpu going down.  The list3 structure is usually allocated from
1097  * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1098  */
1099 int slab_dead_cpu(unsigned int cpu)
1100 {
1101 	mutex_lock(&slab_mutex);
1102 	cpuup_canceled(cpu);
1103 	mutex_unlock(&slab_mutex);
1104 	return 0;
1105 }
1106 #endif
1107 
1108 static int slab_online_cpu(unsigned int cpu)
1109 {
1110 	start_cpu_timer(cpu);
1111 	return 0;
1112 }
1113 
1114 static int slab_offline_cpu(unsigned int cpu)
1115 {
1116 	/*
1117 	 * Shutdown cache reaper. Note that the slab_mutex is held so
1118 	 * that if cache_reap() is invoked it cannot do anything
1119 	 * expensive but will only modify reap_work and reschedule the
1120 	 * timer.
1121 	 */
1122 	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1123 	/* Now the cache_reaper is guaranteed to be not running. */
1124 	per_cpu(slab_reap_work, cpu).work.func = NULL;
1125 	return 0;
1126 }
1127 
1128 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1129 /*
1130  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1131  * Returns -EBUSY if all objects cannot be drained so that the node is not
1132  * removed.
1133  *
1134  * Must hold slab_mutex.
1135  */
1136 static int __meminit drain_cache_node_node(int node)
1137 {
1138 	struct kmem_cache *cachep;
1139 	int ret = 0;
1140 
1141 	list_for_each_entry(cachep, &slab_caches, list) {
1142 		struct kmem_cache_node *n;
1143 
1144 		n = get_node(cachep, node);
1145 		if (!n)
1146 			continue;
1147 
1148 		drain_freelist(cachep, n, INT_MAX);
1149 
1150 		if (!list_empty(&n->slabs_full) ||
1151 		    !list_empty(&n->slabs_partial)) {
1152 			ret = -EBUSY;
1153 			break;
1154 		}
1155 	}
1156 	return ret;
1157 }
1158 
1159 static int __meminit slab_memory_callback(struct notifier_block *self,
1160 					unsigned long action, void *arg)
1161 {
1162 	struct memory_notify *mnb = arg;
1163 	int ret = 0;
1164 	int nid;
1165 
1166 	nid = mnb->status_change_nid;
1167 	if (nid < 0)
1168 		goto out;
1169 
1170 	switch (action) {
1171 	case MEM_GOING_ONLINE:
1172 		mutex_lock(&slab_mutex);
1173 		ret = init_cache_node_node(nid);
1174 		mutex_unlock(&slab_mutex);
1175 		break;
1176 	case MEM_GOING_OFFLINE:
1177 		mutex_lock(&slab_mutex);
1178 		ret = drain_cache_node_node(nid);
1179 		mutex_unlock(&slab_mutex);
1180 		break;
1181 	case MEM_ONLINE:
1182 	case MEM_OFFLINE:
1183 	case MEM_CANCEL_ONLINE:
1184 	case MEM_CANCEL_OFFLINE:
1185 		break;
1186 	}
1187 out:
1188 	return notifier_from_errno(ret);
1189 }
1190 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1191 
1192 /*
1193  * swap the static kmem_cache_node with kmalloced memory
1194  */
1195 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1196 				int nodeid)
1197 {
1198 	struct kmem_cache_node *ptr;
1199 
1200 	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1201 	BUG_ON(!ptr);
1202 
1203 	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1204 	/*
1205 	 * Do not assume that spinlocks can be initialized via memcpy:
1206 	 */
1207 	spin_lock_init(&ptr->list_lock);
1208 
1209 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1210 	cachep->node[nodeid] = ptr;
1211 }
1212 
1213 /*
1214  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1215  * size of kmem_cache_node.
1216  */
1217 static void __init set_up_node(struct kmem_cache *cachep, int index)
1218 {
1219 	int node;
1220 
1221 	for_each_online_node(node) {
1222 		cachep->node[node] = &init_kmem_cache_node[index + node];
1223 		cachep->node[node]->next_reap = jiffies +
1224 		    REAPTIMEOUT_NODE +
1225 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1226 	}
1227 }
1228 
1229 /*
1230  * Initialisation.  Called after the page allocator have been initialised and
1231  * before smp_init().
1232  */
1233 void __init kmem_cache_init(void)
1234 {
1235 	int i;
1236 
1237 	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1238 					sizeof(struct rcu_head));
1239 	kmem_cache = &kmem_cache_boot;
1240 
1241 	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1242 		use_alien_caches = 0;
1243 
1244 	for (i = 0; i < NUM_INIT_LISTS; i++)
1245 		kmem_cache_node_init(&init_kmem_cache_node[i]);
1246 
1247 	/*
1248 	 * Fragmentation resistance on low memory - only use bigger
1249 	 * page orders on machines with more than 32MB of memory if
1250 	 * not overridden on the command line.
1251 	 */
1252 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1253 		slab_max_order = SLAB_MAX_ORDER_HI;
1254 
1255 	/* Bootstrap is tricky, because several objects are allocated
1256 	 * from caches that do not exist yet:
1257 	 * 1) initialize the kmem_cache cache: it contains the struct
1258 	 *    kmem_cache structures of all caches, except kmem_cache itself:
1259 	 *    kmem_cache is statically allocated.
1260 	 *    Initially an __init data area is used for the head array and the
1261 	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1262 	 *    array at the end of the bootstrap.
1263 	 * 2) Create the first kmalloc cache.
1264 	 *    The struct kmem_cache for the new cache is allocated normally.
1265 	 *    An __init data area is used for the head array.
1266 	 * 3) Create the remaining kmalloc caches, with minimally sized
1267 	 *    head arrays.
1268 	 * 4) Replace the __init data head arrays for kmem_cache and the first
1269 	 *    kmalloc cache with kmalloc allocated arrays.
1270 	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1271 	 *    the other cache's with kmalloc allocated memory.
1272 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1273 	 */
1274 
1275 	/* 1) create the kmem_cache */
1276 
1277 	/*
1278 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1279 	 */
1280 	create_boot_cache(kmem_cache, "kmem_cache",
1281 		offsetof(struct kmem_cache, node) +
1282 				  nr_node_ids * sizeof(struct kmem_cache_node *),
1283 				  SLAB_HWCACHE_ALIGN);
1284 	list_add(&kmem_cache->list, &slab_caches);
1285 	slab_state = PARTIAL;
1286 
1287 	/*
1288 	 * Initialize the caches that provide memory for the  kmem_cache_node
1289 	 * structures first.  Without this, further allocations will bug.
1290 	 */
1291 	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
1292 				kmalloc_info[INDEX_NODE].name,
1293 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1294 	slab_state = PARTIAL_NODE;
1295 	setup_kmalloc_cache_index_table();
1296 
1297 	slab_early_init = 0;
1298 
1299 	/* 5) Replace the bootstrap kmem_cache_node */
1300 	{
1301 		int nid;
1302 
1303 		for_each_online_node(nid) {
1304 			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1305 
1306 			init_list(kmalloc_caches[INDEX_NODE],
1307 					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1308 		}
1309 	}
1310 
1311 	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1312 }
1313 
1314 void __init kmem_cache_init_late(void)
1315 {
1316 	struct kmem_cache *cachep;
1317 
1318 	slab_state = UP;
1319 
1320 	/* 6) resize the head arrays to their final sizes */
1321 	mutex_lock(&slab_mutex);
1322 	list_for_each_entry(cachep, &slab_caches, list)
1323 		if (enable_cpucache(cachep, GFP_NOWAIT))
1324 			BUG();
1325 	mutex_unlock(&slab_mutex);
1326 
1327 	/* Done! */
1328 	slab_state = FULL;
1329 
1330 #ifdef CONFIG_NUMA
1331 	/*
1332 	 * Register a memory hotplug callback that initializes and frees
1333 	 * node.
1334 	 */
1335 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1336 #endif
1337 
1338 	/*
1339 	 * The reap timers are started later, with a module init call: That part
1340 	 * of the kernel is not yet operational.
1341 	 */
1342 }
1343 
1344 static int __init cpucache_init(void)
1345 {
1346 	int ret;
1347 
1348 	/*
1349 	 * Register the timers that return unneeded pages to the page allocator
1350 	 */
1351 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1352 				slab_online_cpu, slab_offline_cpu);
1353 	WARN_ON(ret < 0);
1354 
1355 	/* Done! */
1356 	slab_state = FULL;
1357 	return 0;
1358 }
1359 __initcall(cpucache_init);
1360 
1361 static noinline void
1362 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1363 {
1364 #if DEBUG
1365 	struct kmem_cache_node *n;
1366 	unsigned long flags;
1367 	int node;
1368 	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1369 				      DEFAULT_RATELIMIT_BURST);
1370 
1371 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1372 		return;
1373 
1374 	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1375 		nodeid, gfpflags, &gfpflags);
1376 	pr_warn("  cache: %s, object size: %d, order: %d\n",
1377 		cachep->name, cachep->size, cachep->gfporder);
1378 
1379 	for_each_kmem_cache_node(cachep, node, n) {
1380 		unsigned long total_slabs, free_slabs, free_objs;
1381 
1382 		spin_lock_irqsave(&n->list_lock, flags);
1383 		total_slabs = n->total_slabs;
1384 		free_slabs = n->free_slabs;
1385 		free_objs = n->free_objects;
1386 		spin_unlock_irqrestore(&n->list_lock, flags);
1387 
1388 		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1389 			node, total_slabs - free_slabs, total_slabs,
1390 			(total_slabs * cachep->num) - free_objs,
1391 			total_slabs * cachep->num);
1392 	}
1393 #endif
1394 }
1395 
1396 /*
1397  * Interface to system's page allocator. No need to hold the
1398  * kmem_cache_node ->list_lock.
1399  *
1400  * If we requested dmaable memory, we will get it. Even if we
1401  * did not request dmaable memory, we might get it, but that
1402  * would be relatively rare and ignorable.
1403  */
1404 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1405 								int nodeid)
1406 {
1407 	struct page *page;
1408 	int nr_pages;
1409 
1410 	flags |= cachep->allocflags;
1411 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1412 		flags |= __GFP_RECLAIMABLE;
1413 
1414 	page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1415 	if (!page) {
1416 		slab_out_of_memory(cachep, flags, nodeid);
1417 		return NULL;
1418 	}
1419 
1420 	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1421 		__free_pages(page, cachep->gfporder);
1422 		return NULL;
1423 	}
1424 
1425 	nr_pages = (1 << cachep->gfporder);
1426 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1427 		add_zone_page_state(page_zone(page),
1428 			NR_SLAB_RECLAIMABLE, nr_pages);
1429 	else
1430 		add_zone_page_state(page_zone(page),
1431 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1432 
1433 	__SetPageSlab(page);
1434 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1435 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1436 		SetPageSlabPfmemalloc(page);
1437 
1438 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1439 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1440 
1441 		if (cachep->ctor)
1442 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1443 		else
1444 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1445 	}
1446 
1447 	return page;
1448 }
1449 
1450 /*
1451  * Interface to system's page release.
1452  */
1453 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1454 {
1455 	int order = cachep->gfporder;
1456 	unsigned long nr_freed = (1 << order);
1457 
1458 	kmemcheck_free_shadow(page, order);
1459 
1460 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1461 		sub_zone_page_state(page_zone(page),
1462 				NR_SLAB_RECLAIMABLE, nr_freed);
1463 	else
1464 		sub_zone_page_state(page_zone(page),
1465 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1466 
1467 	BUG_ON(!PageSlab(page));
1468 	__ClearPageSlabPfmemalloc(page);
1469 	__ClearPageSlab(page);
1470 	page_mapcount_reset(page);
1471 	page->mapping = NULL;
1472 
1473 	if (current->reclaim_state)
1474 		current->reclaim_state->reclaimed_slab += nr_freed;
1475 	memcg_uncharge_slab(page, order, cachep);
1476 	__free_pages(page, order);
1477 }
1478 
1479 static void kmem_rcu_free(struct rcu_head *head)
1480 {
1481 	struct kmem_cache *cachep;
1482 	struct page *page;
1483 
1484 	page = container_of(head, struct page, rcu_head);
1485 	cachep = page->slab_cache;
1486 
1487 	kmem_freepages(cachep, page);
1488 }
1489 
1490 #if DEBUG
1491 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1492 {
1493 	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1494 		(cachep->size % PAGE_SIZE) == 0)
1495 		return true;
1496 
1497 	return false;
1498 }
1499 
1500 #ifdef CONFIG_DEBUG_PAGEALLOC
1501 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1502 			    unsigned long caller)
1503 {
1504 	int size = cachep->object_size;
1505 
1506 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1507 
1508 	if (size < 5 * sizeof(unsigned long))
1509 		return;
1510 
1511 	*addr++ = 0x12345678;
1512 	*addr++ = caller;
1513 	*addr++ = smp_processor_id();
1514 	size -= 3 * sizeof(unsigned long);
1515 	{
1516 		unsigned long *sptr = &caller;
1517 		unsigned long svalue;
1518 
1519 		while (!kstack_end(sptr)) {
1520 			svalue = *sptr++;
1521 			if (kernel_text_address(svalue)) {
1522 				*addr++ = svalue;
1523 				size -= sizeof(unsigned long);
1524 				if (size <= sizeof(unsigned long))
1525 					break;
1526 			}
1527 		}
1528 
1529 	}
1530 	*addr++ = 0x87654321;
1531 }
1532 
1533 static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1534 				int map, unsigned long caller)
1535 {
1536 	if (!is_debug_pagealloc_cache(cachep))
1537 		return;
1538 
1539 	if (caller)
1540 		store_stackinfo(cachep, objp, caller);
1541 
1542 	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1543 }
1544 
1545 #else
1546 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1547 				int map, unsigned long caller) {}
1548 
1549 #endif
1550 
1551 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1552 {
1553 	int size = cachep->object_size;
1554 	addr = &((char *)addr)[obj_offset(cachep)];
1555 
1556 	memset(addr, val, size);
1557 	*(unsigned char *)(addr + size - 1) = POISON_END;
1558 }
1559 
1560 static void dump_line(char *data, int offset, int limit)
1561 {
1562 	int i;
1563 	unsigned char error = 0;
1564 	int bad_count = 0;
1565 
1566 	pr_err("%03x: ", offset);
1567 	for (i = 0; i < limit; i++) {
1568 		if (data[offset + i] != POISON_FREE) {
1569 			error = data[offset + i];
1570 			bad_count++;
1571 		}
1572 	}
1573 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1574 			&data[offset], limit, 1);
1575 
1576 	if (bad_count == 1) {
1577 		error ^= POISON_FREE;
1578 		if (!(error & (error - 1))) {
1579 			pr_err("Single bit error detected. Probably bad RAM.\n");
1580 #ifdef CONFIG_X86
1581 			pr_err("Run memtest86+ or a similar memory test tool.\n");
1582 #else
1583 			pr_err("Run a memory test tool.\n");
1584 #endif
1585 		}
1586 	}
1587 }
1588 #endif
1589 
1590 #if DEBUG
1591 
1592 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1593 {
1594 	int i, size;
1595 	char *realobj;
1596 
1597 	if (cachep->flags & SLAB_RED_ZONE) {
1598 		pr_err("Redzone: 0x%llx/0x%llx\n",
1599 		       *dbg_redzone1(cachep, objp),
1600 		       *dbg_redzone2(cachep, objp));
1601 	}
1602 
1603 	if (cachep->flags & SLAB_STORE_USER) {
1604 		pr_err("Last user: [<%p>](%pSR)\n",
1605 		       *dbg_userword(cachep, objp),
1606 		       *dbg_userword(cachep, objp));
1607 	}
1608 	realobj = (char *)objp + obj_offset(cachep);
1609 	size = cachep->object_size;
1610 	for (i = 0; i < size && lines; i += 16, lines--) {
1611 		int limit;
1612 		limit = 16;
1613 		if (i + limit > size)
1614 			limit = size - i;
1615 		dump_line(realobj, i, limit);
1616 	}
1617 }
1618 
1619 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1620 {
1621 	char *realobj;
1622 	int size, i;
1623 	int lines = 0;
1624 
1625 	if (is_debug_pagealloc_cache(cachep))
1626 		return;
1627 
1628 	realobj = (char *)objp + obj_offset(cachep);
1629 	size = cachep->object_size;
1630 
1631 	for (i = 0; i < size; i++) {
1632 		char exp = POISON_FREE;
1633 		if (i == size - 1)
1634 			exp = POISON_END;
1635 		if (realobj[i] != exp) {
1636 			int limit;
1637 			/* Mismatch ! */
1638 			/* Print header */
1639 			if (lines == 0) {
1640 				pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1641 				       print_tainted(), cachep->name,
1642 				       realobj, size);
1643 				print_objinfo(cachep, objp, 0);
1644 			}
1645 			/* Hexdump the affected line */
1646 			i = (i / 16) * 16;
1647 			limit = 16;
1648 			if (i + limit > size)
1649 				limit = size - i;
1650 			dump_line(realobj, i, limit);
1651 			i += 16;
1652 			lines++;
1653 			/* Limit to 5 lines */
1654 			if (lines > 5)
1655 				break;
1656 		}
1657 	}
1658 	if (lines != 0) {
1659 		/* Print some data about the neighboring objects, if they
1660 		 * exist:
1661 		 */
1662 		struct page *page = virt_to_head_page(objp);
1663 		unsigned int objnr;
1664 
1665 		objnr = obj_to_index(cachep, page, objp);
1666 		if (objnr) {
1667 			objp = index_to_obj(cachep, page, objnr - 1);
1668 			realobj = (char *)objp + obj_offset(cachep);
1669 			pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
1670 			print_objinfo(cachep, objp, 2);
1671 		}
1672 		if (objnr + 1 < cachep->num) {
1673 			objp = index_to_obj(cachep, page, objnr + 1);
1674 			realobj = (char *)objp + obj_offset(cachep);
1675 			pr_err("Next obj: start=%p, len=%d\n", realobj, size);
1676 			print_objinfo(cachep, objp, 2);
1677 		}
1678 	}
1679 }
1680 #endif
1681 
1682 #if DEBUG
1683 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1684 						struct page *page)
1685 {
1686 	int i;
1687 
1688 	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1689 		poison_obj(cachep, page->freelist - obj_offset(cachep),
1690 			POISON_FREE);
1691 	}
1692 
1693 	for (i = 0; i < cachep->num; i++) {
1694 		void *objp = index_to_obj(cachep, page, i);
1695 
1696 		if (cachep->flags & SLAB_POISON) {
1697 			check_poison_obj(cachep, objp);
1698 			slab_kernel_map(cachep, objp, 1, 0);
1699 		}
1700 		if (cachep->flags & SLAB_RED_ZONE) {
1701 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1702 				slab_error(cachep, "start of a freed object was overwritten");
1703 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1704 				slab_error(cachep, "end of a freed object was overwritten");
1705 		}
1706 	}
1707 }
1708 #else
1709 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1710 						struct page *page)
1711 {
1712 }
1713 #endif
1714 
1715 /**
1716  * slab_destroy - destroy and release all objects in a slab
1717  * @cachep: cache pointer being destroyed
1718  * @page: page pointer being destroyed
1719  *
1720  * Destroy all the objs in a slab page, and release the mem back to the system.
1721  * Before calling the slab page must have been unlinked from the cache. The
1722  * kmem_cache_node ->list_lock is not held/needed.
1723  */
1724 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1725 {
1726 	void *freelist;
1727 
1728 	freelist = page->freelist;
1729 	slab_destroy_debugcheck(cachep, page);
1730 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1731 		call_rcu(&page->rcu_head, kmem_rcu_free);
1732 	else
1733 		kmem_freepages(cachep, page);
1734 
1735 	/*
1736 	 * From now on, we don't use freelist
1737 	 * although actual page can be freed in rcu context
1738 	 */
1739 	if (OFF_SLAB(cachep))
1740 		kmem_cache_free(cachep->freelist_cache, freelist);
1741 }
1742 
1743 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1744 {
1745 	struct page *page, *n;
1746 
1747 	list_for_each_entry_safe(page, n, list, lru) {
1748 		list_del(&page->lru);
1749 		slab_destroy(cachep, page);
1750 	}
1751 }
1752 
1753 /**
1754  * calculate_slab_order - calculate size (page order) of slabs
1755  * @cachep: pointer to the cache that is being created
1756  * @size: size of objects to be created in this cache.
1757  * @flags: slab allocation flags
1758  *
1759  * Also calculates the number of objects per slab.
1760  *
1761  * This could be made much more intelligent.  For now, try to avoid using
1762  * high order pages for slabs.  When the gfp() functions are more friendly
1763  * towards high-order requests, this should be changed.
1764  */
1765 static size_t calculate_slab_order(struct kmem_cache *cachep,
1766 				size_t size, unsigned long flags)
1767 {
1768 	size_t left_over = 0;
1769 	int gfporder;
1770 
1771 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1772 		unsigned int num;
1773 		size_t remainder;
1774 
1775 		num = cache_estimate(gfporder, size, flags, &remainder);
1776 		if (!num)
1777 			continue;
1778 
1779 		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1780 		if (num > SLAB_OBJ_MAX_NUM)
1781 			break;
1782 
1783 		if (flags & CFLGS_OFF_SLAB) {
1784 			struct kmem_cache *freelist_cache;
1785 			size_t freelist_size;
1786 
1787 			freelist_size = num * sizeof(freelist_idx_t);
1788 			freelist_cache = kmalloc_slab(freelist_size, 0u);
1789 			if (!freelist_cache)
1790 				continue;
1791 
1792 			/*
1793 			 * Needed to avoid possible looping condition
1794 			 * in cache_grow_begin()
1795 			 */
1796 			if (OFF_SLAB(freelist_cache))
1797 				continue;
1798 
1799 			/* check if off slab has enough benefit */
1800 			if (freelist_cache->size > cachep->size / 2)
1801 				continue;
1802 		}
1803 
1804 		/* Found something acceptable - save it away */
1805 		cachep->num = num;
1806 		cachep->gfporder = gfporder;
1807 		left_over = remainder;
1808 
1809 		/*
1810 		 * A VFS-reclaimable slab tends to have most allocations
1811 		 * as GFP_NOFS and we really don't want to have to be allocating
1812 		 * higher-order pages when we are unable to shrink dcache.
1813 		 */
1814 		if (flags & SLAB_RECLAIM_ACCOUNT)
1815 			break;
1816 
1817 		/*
1818 		 * Large number of objects is good, but very large slabs are
1819 		 * currently bad for the gfp()s.
1820 		 */
1821 		if (gfporder >= slab_max_order)
1822 			break;
1823 
1824 		/*
1825 		 * Acceptable internal fragmentation?
1826 		 */
1827 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1828 			break;
1829 	}
1830 	return left_over;
1831 }
1832 
1833 static struct array_cache __percpu *alloc_kmem_cache_cpus(
1834 		struct kmem_cache *cachep, int entries, int batchcount)
1835 {
1836 	int cpu;
1837 	size_t size;
1838 	struct array_cache __percpu *cpu_cache;
1839 
1840 	size = sizeof(void *) * entries + sizeof(struct array_cache);
1841 	cpu_cache = __alloc_percpu(size, sizeof(void *));
1842 
1843 	if (!cpu_cache)
1844 		return NULL;
1845 
1846 	for_each_possible_cpu(cpu) {
1847 		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1848 				entries, batchcount);
1849 	}
1850 
1851 	return cpu_cache;
1852 }
1853 
1854 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1855 {
1856 	if (slab_state >= FULL)
1857 		return enable_cpucache(cachep, gfp);
1858 
1859 	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1860 	if (!cachep->cpu_cache)
1861 		return 1;
1862 
1863 	if (slab_state == DOWN) {
1864 		/* Creation of first cache (kmem_cache). */
1865 		set_up_node(kmem_cache, CACHE_CACHE);
1866 	} else if (slab_state == PARTIAL) {
1867 		/* For kmem_cache_node */
1868 		set_up_node(cachep, SIZE_NODE);
1869 	} else {
1870 		int node;
1871 
1872 		for_each_online_node(node) {
1873 			cachep->node[node] = kmalloc_node(
1874 				sizeof(struct kmem_cache_node), gfp, node);
1875 			BUG_ON(!cachep->node[node]);
1876 			kmem_cache_node_init(cachep->node[node]);
1877 		}
1878 	}
1879 
1880 	cachep->node[numa_mem_id()]->next_reap =
1881 			jiffies + REAPTIMEOUT_NODE +
1882 			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1883 
1884 	cpu_cache_get(cachep)->avail = 0;
1885 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1886 	cpu_cache_get(cachep)->batchcount = 1;
1887 	cpu_cache_get(cachep)->touched = 0;
1888 	cachep->batchcount = 1;
1889 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1890 	return 0;
1891 }
1892 
1893 unsigned long kmem_cache_flags(unsigned long object_size,
1894 	unsigned long flags, const char *name,
1895 	void (*ctor)(void *))
1896 {
1897 	return flags;
1898 }
1899 
1900 struct kmem_cache *
1901 __kmem_cache_alias(const char *name, size_t size, size_t align,
1902 		   unsigned long flags, void (*ctor)(void *))
1903 {
1904 	struct kmem_cache *cachep;
1905 
1906 	cachep = find_mergeable(size, align, flags, name, ctor);
1907 	if (cachep) {
1908 		cachep->refcount++;
1909 
1910 		/*
1911 		 * Adjust the object sizes so that we clear
1912 		 * the complete object on kzalloc.
1913 		 */
1914 		cachep->object_size = max_t(int, cachep->object_size, size);
1915 	}
1916 	return cachep;
1917 }
1918 
1919 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1920 			size_t size, unsigned long flags)
1921 {
1922 	size_t left;
1923 
1924 	cachep->num = 0;
1925 
1926 	if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
1927 		return false;
1928 
1929 	left = calculate_slab_order(cachep, size,
1930 			flags | CFLGS_OBJFREELIST_SLAB);
1931 	if (!cachep->num)
1932 		return false;
1933 
1934 	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1935 		return false;
1936 
1937 	cachep->colour = left / cachep->colour_off;
1938 
1939 	return true;
1940 }
1941 
1942 static bool set_off_slab_cache(struct kmem_cache *cachep,
1943 			size_t size, unsigned long flags)
1944 {
1945 	size_t left;
1946 
1947 	cachep->num = 0;
1948 
1949 	/*
1950 	 * Always use on-slab management when SLAB_NOLEAKTRACE
1951 	 * to avoid recursive calls into kmemleak.
1952 	 */
1953 	if (flags & SLAB_NOLEAKTRACE)
1954 		return false;
1955 
1956 	/*
1957 	 * Size is large, assume best to place the slab management obj
1958 	 * off-slab (should allow better packing of objs).
1959 	 */
1960 	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1961 	if (!cachep->num)
1962 		return false;
1963 
1964 	/*
1965 	 * If the slab has been placed off-slab, and we have enough space then
1966 	 * move it on-slab. This is at the expense of any extra colouring.
1967 	 */
1968 	if (left >= cachep->num * sizeof(freelist_idx_t))
1969 		return false;
1970 
1971 	cachep->colour = left / cachep->colour_off;
1972 
1973 	return true;
1974 }
1975 
1976 static bool set_on_slab_cache(struct kmem_cache *cachep,
1977 			size_t size, unsigned long flags)
1978 {
1979 	size_t left;
1980 
1981 	cachep->num = 0;
1982 
1983 	left = calculate_slab_order(cachep, size, flags);
1984 	if (!cachep->num)
1985 		return false;
1986 
1987 	cachep->colour = left / cachep->colour_off;
1988 
1989 	return true;
1990 }
1991 
1992 /**
1993  * __kmem_cache_create - Create a cache.
1994  * @cachep: cache management descriptor
1995  * @flags: SLAB flags
1996  *
1997  * Returns a ptr to the cache on success, NULL on failure.
1998  * Cannot be called within a int, but can be interrupted.
1999  * The @ctor is run when new pages are allocated by the cache.
2000  *
2001  * The flags are
2002  *
2003  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2004  * to catch references to uninitialised memory.
2005  *
2006  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2007  * for buffer overruns.
2008  *
2009  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2010  * cacheline.  This can be beneficial if you're counting cycles as closely
2011  * as davem.
2012  */
2013 int
2014 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2015 {
2016 	size_t ralign = BYTES_PER_WORD;
2017 	gfp_t gfp;
2018 	int err;
2019 	size_t size = cachep->size;
2020 
2021 #if DEBUG
2022 #if FORCED_DEBUG
2023 	/*
2024 	 * Enable redzoning and last user accounting, except for caches with
2025 	 * large objects, if the increased size would increase the object size
2026 	 * above the next power of two: caches with object sizes just above a
2027 	 * power of two have a significant amount of internal fragmentation.
2028 	 */
2029 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2030 						2 * sizeof(unsigned long long)))
2031 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2032 	if (!(flags & SLAB_DESTROY_BY_RCU))
2033 		flags |= SLAB_POISON;
2034 #endif
2035 #endif
2036 
2037 	/*
2038 	 * Check that size is in terms of words.  This is needed to avoid
2039 	 * unaligned accesses for some archs when redzoning is used, and makes
2040 	 * sure any on-slab bufctl's are also correctly aligned.
2041 	 */
2042 	if (size & (BYTES_PER_WORD - 1)) {
2043 		size += (BYTES_PER_WORD - 1);
2044 		size &= ~(BYTES_PER_WORD - 1);
2045 	}
2046 
2047 	if (flags & SLAB_RED_ZONE) {
2048 		ralign = REDZONE_ALIGN;
2049 		/* If redzoning, ensure that the second redzone is suitably
2050 		 * aligned, by adjusting the object size accordingly. */
2051 		size += REDZONE_ALIGN - 1;
2052 		size &= ~(REDZONE_ALIGN - 1);
2053 	}
2054 
2055 	/* 3) caller mandated alignment */
2056 	if (ralign < cachep->align) {
2057 		ralign = cachep->align;
2058 	}
2059 	/* disable debug if necessary */
2060 	if (ralign > __alignof__(unsigned long long))
2061 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2062 	/*
2063 	 * 4) Store it.
2064 	 */
2065 	cachep->align = ralign;
2066 	cachep->colour_off = cache_line_size();
2067 	/* Offset must be a multiple of the alignment. */
2068 	if (cachep->colour_off < cachep->align)
2069 		cachep->colour_off = cachep->align;
2070 
2071 	if (slab_is_available())
2072 		gfp = GFP_KERNEL;
2073 	else
2074 		gfp = GFP_NOWAIT;
2075 
2076 #if DEBUG
2077 
2078 	/*
2079 	 * Both debugging options require word-alignment which is calculated
2080 	 * into align above.
2081 	 */
2082 	if (flags & SLAB_RED_ZONE) {
2083 		/* add space for red zone words */
2084 		cachep->obj_offset += sizeof(unsigned long long);
2085 		size += 2 * sizeof(unsigned long long);
2086 	}
2087 	if (flags & SLAB_STORE_USER) {
2088 		/* user store requires one word storage behind the end of
2089 		 * the real object. But if the second red zone needs to be
2090 		 * aligned to 64 bits, we must allow that much space.
2091 		 */
2092 		if (flags & SLAB_RED_ZONE)
2093 			size += REDZONE_ALIGN;
2094 		else
2095 			size += BYTES_PER_WORD;
2096 	}
2097 #endif
2098 
2099 	kasan_cache_create(cachep, &size, &flags);
2100 
2101 	size = ALIGN(size, cachep->align);
2102 	/*
2103 	 * We should restrict the number of objects in a slab to implement
2104 	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2105 	 */
2106 	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2107 		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2108 
2109 #if DEBUG
2110 	/*
2111 	 * To activate debug pagealloc, off-slab management is necessary
2112 	 * requirement. In early phase of initialization, small sized slab
2113 	 * doesn't get initialized so it would not be possible. So, we need
2114 	 * to check size >= 256. It guarantees that all necessary small
2115 	 * sized slab is initialized in current slab initialization sequence.
2116 	 */
2117 	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2118 		size >= 256 && cachep->object_size > cache_line_size()) {
2119 		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2120 			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2121 
2122 			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2123 				flags |= CFLGS_OFF_SLAB;
2124 				cachep->obj_offset += tmp_size - size;
2125 				size = tmp_size;
2126 				goto done;
2127 			}
2128 		}
2129 	}
2130 #endif
2131 
2132 	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2133 		flags |= CFLGS_OBJFREELIST_SLAB;
2134 		goto done;
2135 	}
2136 
2137 	if (set_off_slab_cache(cachep, size, flags)) {
2138 		flags |= CFLGS_OFF_SLAB;
2139 		goto done;
2140 	}
2141 
2142 	if (set_on_slab_cache(cachep, size, flags))
2143 		goto done;
2144 
2145 	return -E2BIG;
2146 
2147 done:
2148 	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2149 	cachep->flags = flags;
2150 	cachep->allocflags = __GFP_COMP;
2151 	if (flags & SLAB_CACHE_DMA)
2152 		cachep->allocflags |= GFP_DMA;
2153 	cachep->size = size;
2154 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2155 
2156 #if DEBUG
2157 	/*
2158 	 * If we're going to use the generic kernel_map_pages()
2159 	 * poisoning, then it's going to smash the contents of
2160 	 * the redzone and userword anyhow, so switch them off.
2161 	 */
2162 	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2163 		(cachep->flags & SLAB_POISON) &&
2164 		is_debug_pagealloc_cache(cachep))
2165 		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2166 #endif
2167 
2168 	if (OFF_SLAB(cachep)) {
2169 		cachep->freelist_cache =
2170 			kmalloc_slab(cachep->freelist_size, 0u);
2171 	}
2172 
2173 	err = setup_cpu_cache(cachep, gfp);
2174 	if (err) {
2175 		__kmem_cache_release(cachep);
2176 		return err;
2177 	}
2178 
2179 	return 0;
2180 }
2181 
2182 #if DEBUG
2183 static void check_irq_off(void)
2184 {
2185 	BUG_ON(!irqs_disabled());
2186 }
2187 
2188 static void check_irq_on(void)
2189 {
2190 	BUG_ON(irqs_disabled());
2191 }
2192 
2193 static void check_mutex_acquired(void)
2194 {
2195 	BUG_ON(!mutex_is_locked(&slab_mutex));
2196 }
2197 
2198 static void check_spinlock_acquired(struct kmem_cache *cachep)
2199 {
2200 #ifdef CONFIG_SMP
2201 	check_irq_off();
2202 	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2203 #endif
2204 }
2205 
2206 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2207 {
2208 #ifdef CONFIG_SMP
2209 	check_irq_off();
2210 	assert_spin_locked(&get_node(cachep, node)->list_lock);
2211 #endif
2212 }
2213 
2214 #else
2215 #define check_irq_off()	do { } while(0)
2216 #define check_irq_on()	do { } while(0)
2217 #define check_mutex_acquired()	do { } while(0)
2218 #define check_spinlock_acquired(x) do { } while(0)
2219 #define check_spinlock_acquired_node(x, y) do { } while(0)
2220 #endif
2221 
2222 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2223 				int node, bool free_all, struct list_head *list)
2224 {
2225 	int tofree;
2226 
2227 	if (!ac || !ac->avail)
2228 		return;
2229 
2230 	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2231 	if (tofree > ac->avail)
2232 		tofree = (ac->avail + 1) / 2;
2233 
2234 	free_block(cachep, ac->entry, tofree, node, list);
2235 	ac->avail -= tofree;
2236 	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2237 }
2238 
2239 static void do_drain(void *arg)
2240 {
2241 	struct kmem_cache *cachep = arg;
2242 	struct array_cache *ac;
2243 	int node = numa_mem_id();
2244 	struct kmem_cache_node *n;
2245 	LIST_HEAD(list);
2246 
2247 	check_irq_off();
2248 	ac = cpu_cache_get(cachep);
2249 	n = get_node(cachep, node);
2250 	spin_lock(&n->list_lock);
2251 	free_block(cachep, ac->entry, ac->avail, node, &list);
2252 	spin_unlock(&n->list_lock);
2253 	slabs_destroy(cachep, &list);
2254 	ac->avail = 0;
2255 }
2256 
2257 static void drain_cpu_caches(struct kmem_cache *cachep)
2258 {
2259 	struct kmem_cache_node *n;
2260 	int node;
2261 	LIST_HEAD(list);
2262 
2263 	on_each_cpu(do_drain, cachep, 1);
2264 	check_irq_on();
2265 	for_each_kmem_cache_node(cachep, node, n)
2266 		if (n->alien)
2267 			drain_alien_cache(cachep, n->alien);
2268 
2269 	for_each_kmem_cache_node(cachep, node, n) {
2270 		spin_lock_irq(&n->list_lock);
2271 		drain_array_locked(cachep, n->shared, node, true, &list);
2272 		spin_unlock_irq(&n->list_lock);
2273 
2274 		slabs_destroy(cachep, &list);
2275 	}
2276 }
2277 
2278 /*
2279  * Remove slabs from the list of free slabs.
2280  * Specify the number of slabs to drain in tofree.
2281  *
2282  * Returns the actual number of slabs released.
2283  */
2284 static int drain_freelist(struct kmem_cache *cache,
2285 			struct kmem_cache_node *n, int tofree)
2286 {
2287 	struct list_head *p;
2288 	int nr_freed;
2289 	struct page *page;
2290 
2291 	nr_freed = 0;
2292 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2293 
2294 		spin_lock_irq(&n->list_lock);
2295 		p = n->slabs_free.prev;
2296 		if (p == &n->slabs_free) {
2297 			spin_unlock_irq(&n->list_lock);
2298 			goto out;
2299 		}
2300 
2301 		page = list_entry(p, struct page, lru);
2302 		list_del(&page->lru);
2303 		n->free_slabs--;
2304 		n->total_slabs--;
2305 		/*
2306 		 * Safe to drop the lock. The slab is no longer linked
2307 		 * to the cache.
2308 		 */
2309 		n->free_objects -= cache->num;
2310 		spin_unlock_irq(&n->list_lock);
2311 		slab_destroy(cache, page);
2312 		nr_freed++;
2313 	}
2314 out:
2315 	return nr_freed;
2316 }
2317 
2318 int __kmem_cache_shrink(struct kmem_cache *cachep)
2319 {
2320 	int ret = 0;
2321 	int node;
2322 	struct kmem_cache_node *n;
2323 
2324 	drain_cpu_caches(cachep);
2325 
2326 	check_irq_on();
2327 	for_each_kmem_cache_node(cachep, node, n) {
2328 		drain_freelist(cachep, n, INT_MAX);
2329 
2330 		ret += !list_empty(&n->slabs_full) ||
2331 			!list_empty(&n->slabs_partial);
2332 	}
2333 	return (ret ? 1 : 0);
2334 }
2335 
2336 #ifdef CONFIG_MEMCG
2337 void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2338 {
2339 	__kmem_cache_shrink(cachep);
2340 }
2341 #endif
2342 
2343 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2344 {
2345 	return __kmem_cache_shrink(cachep);
2346 }
2347 
2348 void __kmem_cache_release(struct kmem_cache *cachep)
2349 {
2350 	int i;
2351 	struct kmem_cache_node *n;
2352 
2353 	cache_random_seq_destroy(cachep);
2354 
2355 	free_percpu(cachep->cpu_cache);
2356 
2357 	/* NUMA: free the node structures */
2358 	for_each_kmem_cache_node(cachep, i, n) {
2359 		kfree(n->shared);
2360 		free_alien_cache(n->alien);
2361 		kfree(n);
2362 		cachep->node[i] = NULL;
2363 	}
2364 }
2365 
2366 /*
2367  * Get the memory for a slab management obj.
2368  *
2369  * For a slab cache when the slab descriptor is off-slab, the
2370  * slab descriptor can't come from the same cache which is being created,
2371  * Because if it is the case, that means we defer the creation of
2372  * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2373  * And we eventually call down to __kmem_cache_create(), which
2374  * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2375  * This is a "chicken-and-egg" problem.
2376  *
2377  * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2378  * which are all initialized during kmem_cache_init().
2379  */
2380 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2381 				   struct page *page, int colour_off,
2382 				   gfp_t local_flags, int nodeid)
2383 {
2384 	void *freelist;
2385 	void *addr = page_address(page);
2386 
2387 	page->s_mem = addr + colour_off;
2388 	page->active = 0;
2389 
2390 	if (OBJFREELIST_SLAB(cachep))
2391 		freelist = NULL;
2392 	else if (OFF_SLAB(cachep)) {
2393 		/* Slab management obj is off-slab. */
2394 		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2395 					      local_flags, nodeid);
2396 		if (!freelist)
2397 			return NULL;
2398 	} else {
2399 		/* We will use last bytes at the slab for freelist */
2400 		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2401 				cachep->freelist_size;
2402 	}
2403 
2404 	return freelist;
2405 }
2406 
2407 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2408 {
2409 	return ((freelist_idx_t *)page->freelist)[idx];
2410 }
2411 
2412 static inline void set_free_obj(struct page *page,
2413 					unsigned int idx, freelist_idx_t val)
2414 {
2415 	((freelist_idx_t *)(page->freelist))[idx] = val;
2416 }
2417 
2418 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2419 {
2420 #if DEBUG
2421 	int i;
2422 
2423 	for (i = 0; i < cachep->num; i++) {
2424 		void *objp = index_to_obj(cachep, page, i);
2425 
2426 		if (cachep->flags & SLAB_STORE_USER)
2427 			*dbg_userword(cachep, objp) = NULL;
2428 
2429 		if (cachep->flags & SLAB_RED_ZONE) {
2430 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2431 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2432 		}
2433 		/*
2434 		 * Constructors are not allowed to allocate memory from the same
2435 		 * cache which they are a constructor for.  Otherwise, deadlock.
2436 		 * They must also be threaded.
2437 		 */
2438 		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2439 			kasan_unpoison_object_data(cachep,
2440 						   objp + obj_offset(cachep));
2441 			cachep->ctor(objp + obj_offset(cachep));
2442 			kasan_poison_object_data(
2443 				cachep, objp + obj_offset(cachep));
2444 		}
2445 
2446 		if (cachep->flags & SLAB_RED_ZONE) {
2447 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2448 				slab_error(cachep, "constructor overwrote the end of an object");
2449 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2450 				slab_error(cachep, "constructor overwrote the start of an object");
2451 		}
2452 		/* need to poison the objs? */
2453 		if (cachep->flags & SLAB_POISON) {
2454 			poison_obj(cachep, objp, POISON_FREE);
2455 			slab_kernel_map(cachep, objp, 0, 0);
2456 		}
2457 	}
2458 #endif
2459 }
2460 
2461 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2462 /* Hold information during a freelist initialization */
2463 union freelist_init_state {
2464 	struct {
2465 		unsigned int pos;
2466 		unsigned int *list;
2467 		unsigned int count;
2468 	};
2469 	struct rnd_state rnd_state;
2470 };
2471 
2472 /*
2473  * Initialize the state based on the randomization methode available.
2474  * return true if the pre-computed list is available, false otherwize.
2475  */
2476 static bool freelist_state_initialize(union freelist_init_state *state,
2477 				struct kmem_cache *cachep,
2478 				unsigned int count)
2479 {
2480 	bool ret;
2481 	unsigned int rand;
2482 
2483 	/* Use best entropy available to define a random shift */
2484 	rand = get_random_int();
2485 
2486 	/* Use a random state if the pre-computed list is not available */
2487 	if (!cachep->random_seq) {
2488 		prandom_seed_state(&state->rnd_state, rand);
2489 		ret = false;
2490 	} else {
2491 		state->list = cachep->random_seq;
2492 		state->count = count;
2493 		state->pos = rand % count;
2494 		ret = true;
2495 	}
2496 	return ret;
2497 }
2498 
2499 /* Get the next entry on the list and randomize it using a random shift */
2500 static freelist_idx_t next_random_slot(union freelist_init_state *state)
2501 {
2502 	if (state->pos >= state->count)
2503 		state->pos = 0;
2504 	return state->list[state->pos++];
2505 }
2506 
2507 /* Swap two freelist entries */
2508 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2509 {
2510 	swap(((freelist_idx_t *)page->freelist)[a],
2511 		((freelist_idx_t *)page->freelist)[b]);
2512 }
2513 
2514 /*
2515  * Shuffle the freelist initialization state based on pre-computed lists.
2516  * return true if the list was successfully shuffled, false otherwise.
2517  */
2518 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2519 {
2520 	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2521 	union freelist_init_state state;
2522 	bool precomputed;
2523 
2524 	if (count < 2)
2525 		return false;
2526 
2527 	precomputed = freelist_state_initialize(&state, cachep, count);
2528 
2529 	/* Take a random entry as the objfreelist */
2530 	if (OBJFREELIST_SLAB(cachep)) {
2531 		if (!precomputed)
2532 			objfreelist = count - 1;
2533 		else
2534 			objfreelist = next_random_slot(&state);
2535 		page->freelist = index_to_obj(cachep, page, objfreelist) +
2536 						obj_offset(cachep);
2537 		count--;
2538 	}
2539 
2540 	/*
2541 	 * On early boot, generate the list dynamically.
2542 	 * Later use a pre-computed list for speed.
2543 	 */
2544 	if (!precomputed) {
2545 		for (i = 0; i < count; i++)
2546 			set_free_obj(page, i, i);
2547 
2548 		/* Fisher-Yates shuffle */
2549 		for (i = count - 1; i > 0; i--) {
2550 			rand = prandom_u32_state(&state.rnd_state);
2551 			rand %= (i + 1);
2552 			swap_free_obj(page, i, rand);
2553 		}
2554 	} else {
2555 		for (i = 0; i < count; i++)
2556 			set_free_obj(page, i, next_random_slot(&state));
2557 	}
2558 
2559 	if (OBJFREELIST_SLAB(cachep))
2560 		set_free_obj(page, cachep->num - 1, objfreelist);
2561 
2562 	return true;
2563 }
2564 #else
2565 static inline bool shuffle_freelist(struct kmem_cache *cachep,
2566 				struct page *page)
2567 {
2568 	return false;
2569 }
2570 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2571 
2572 static void cache_init_objs(struct kmem_cache *cachep,
2573 			    struct page *page)
2574 {
2575 	int i;
2576 	void *objp;
2577 	bool shuffled;
2578 
2579 	cache_init_objs_debug(cachep, page);
2580 
2581 	/* Try to randomize the freelist if enabled */
2582 	shuffled = shuffle_freelist(cachep, page);
2583 
2584 	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2585 		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2586 						obj_offset(cachep);
2587 	}
2588 
2589 	for (i = 0; i < cachep->num; i++) {
2590 		objp = index_to_obj(cachep, page, i);
2591 		kasan_init_slab_obj(cachep, objp);
2592 
2593 		/* constructor could break poison info */
2594 		if (DEBUG == 0 && cachep->ctor) {
2595 			kasan_unpoison_object_data(cachep, objp);
2596 			cachep->ctor(objp);
2597 			kasan_poison_object_data(cachep, objp);
2598 		}
2599 
2600 		if (!shuffled)
2601 			set_free_obj(page, i, i);
2602 	}
2603 }
2604 
2605 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2606 {
2607 	void *objp;
2608 
2609 	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2610 	page->active++;
2611 
2612 #if DEBUG
2613 	if (cachep->flags & SLAB_STORE_USER)
2614 		set_store_user_dirty(cachep);
2615 #endif
2616 
2617 	return objp;
2618 }
2619 
2620 static void slab_put_obj(struct kmem_cache *cachep,
2621 			struct page *page, void *objp)
2622 {
2623 	unsigned int objnr = obj_to_index(cachep, page, objp);
2624 #if DEBUG
2625 	unsigned int i;
2626 
2627 	/* Verify double free bug */
2628 	for (i = page->active; i < cachep->num; i++) {
2629 		if (get_free_obj(page, i) == objnr) {
2630 			pr_err("slab: double free detected in cache '%s', objp %p\n",
2631 			       cachep->name, objp);
2632 			BUG();
2633 		}
2634 	}
2635 #endif
2636 	page->active--;
2637 	if (!page->freelist)
2638 		page->freelist = objp + obj_offset(cachep);
2639 
2640 	set_free_obj(page, page->active, objnr);
2641 }
2642 
2643 /*
2644  * Map pages beginning at addr to the given cache and slab. This is required
2645  * for the slab allocator to be able to lookup the cache and slab of a
2646  * virtual address for kfree, ksize, and slab debugging.
2647  */
2648 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2649 			   void *freelist)
2650 {
2651 	page->slab_cache = cache;
2652 	page->freelist = freelist;
2653 }
2654 
2655 /*
2656  * Grow (by 1) the number of slabs within a cache.  This is called by
2657  * kmem_cache_alloc() when there are no active objs left in a cache.
2658  */
2659 static struct page *cache_grow_begin(struct kmem_cache *cachep,
2660 				gfp_t flags, int nodeid)
2661 {
2662 	void *freelist;
2663 	size_t offset;
2664 	gfp_t local_flags;
2665 	int page_node;
2666 	struct kmem_cache_node *n;
2667 	struct page *page;
2668 
2669 	/*
2670 	 * Be lazy and only check for valid flags here,  keeping it out of the
2671 	 * critical path in kmem_cache_alloc().
2672 	 */
2673 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2674 		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2675 		flags &= ~GFP_SLAB_BUG_MASK;
2676 		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2677 				invalid_mask, &invalid_mask, flags, &flags);
2678 		dump_stack();
2679 	}
2680 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2681 
2682 	check_irq_off();
2683 	if (gfpflags_allow_blocking(local_flags))
2684 		local_irq_enable();
2685 
2686 	/*
2687 	 * Get mem for the objs.  Attempt to allocate a physical page from
2688 	 * 'nodeid'.
2689 	 */
2690 	page = kmem_getpages(cachep, local_flags, nodeid);
2691 	if (!page)
2692 		goto failed;
2693 
2694 	page_node = page_to_nid(page);
2695 	n = get_node(cachep, page_node);
2696 
2697 	/* Get colour for the slab, and cal the next value. */
2698 	n->colour_next++;
2699 	if (n->colour_next >= cachep->colour)
2700 		n->colour_next = 0;
2701 
2702 	offset = n->colour_next;
2703 	if (offset >= cachep->colour)
2704 		offset = 0;
2705 
2706 	offset *= cachep->colour_off;
2707 
2708 	/* Get slab management. */
2709 	freelist = alloc_slabmgmt(cachep, page, offset,
2710 			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2711 	if (OFF_SLAB(cachep) && !freelist)
2712 		goto opps1;
2713 
2714 	slab_map_pages(cachep, page, freelist);
2715 
2716 	kasan_poison_slab(page);
2717 	cache_init_objs(cachep, page);
2718 
2719 	if (gfpflags_allow_blocking(local_flags))
2720 		local_irq_disable();
2721 
2722 	return page;
2723 
2724 opps1:
2725 	kmem_freepages(cachep, page);
2726 failed:
2727 	if (gfpflags_allow_blocking(local_flags))
2728 		local_irq_disable();
2729 	return NULL;
2730 }
2731 
2732 static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2733 {
2734 	struct kmem_cache_node *n;
2735 	void *list = NULL;
2736 
2737 	check_irq_off();
2738 
2739 	if (!page)
2740 		return;
2741 
2742 	INIT_LIST_HEAD(&page->lru);
2743 	n = get_node(cachep, page_to_nid(page));
2744 
2745 	spin_lock(&n->list_lock);
2746 	n->total_slabs++;
2747 	if (!page->active) {
2748 		list_add_tail(&page->lru, &(n->slabs_free));
2749 		n->free_slabs++;
2750 	} else
2751 		fixup_slab_list(cachep, n, page, &list);
2752 
2753 	STATS_INC_GROWN(cachep);
2754 	n->free_objects += cachep->num - page->active;
2755 	spin_unlock(&n->list_lock);
2756 
2757 	fixup_objfreelist_debug(cachep, &list);
2758 }
2759 
2760 #if DEBUG
2761 
2762 /*
2763  * Perform extra freeing checks:
2764  * - detect bad pointers.
2765  * - POISON/RED_ZONE checking
2766  */
2767 static void kfree_debugcheck(const void *objp)
2768 {
2769 	if (!virt_addr_valid(objp)) {
2770 		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2771 		       (unsigned long)objp);
2772 		BUG();
2773 	}
2774 }
2775 
2776 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2777 {
2778 	unsigned long long redzone1, redzone2;
2779 
2780 	redzone1 = *dbg_redzone1(cache, obj);
2781 	redzone2 = *dbg_redzone2(cache, obj);
2782 
2783 	/*
2784 	 * Redzone is ok.
2785 	 */
2786 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2787 		return;
2788 
2789 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2790 		slab_error(cache, "double free detected");
2791 	else
2792 		slab_error(cache, "memory outside object was overwritten");
2793 
2794 	pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2795 	       obj, redzone1, redzone2);
2796 }
2797 
2798 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2799 				   unsigned long caller)
2800 {
2801 	unsigned int objnr;
2802 	struct page *page;
2803 
2804 	BUG_ON(virt_to_cache(objp) != cachep);
2805 
2806 	objp -= obj_offset(cachep);
2807 	kfree_debugcheck(objp);
2808 	page = virt_to_head_page(objp);
2809 
2810 	if (cachep->flags & SLAB_RED_ZONE) {
2811 		verify_redzone_free(cachep, objp);
2812 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2813 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2814 	}
2815 	if (cachep->flags & SLAB_STORE_USER) {
2816 		set_store_user_dirty(cachep);
2817 		*dbg_userword(cachep, objp) = (void *)caller;
2818 	}
2819 
2820 	objnr = obj_to_index(cachep, page, objp);
2821 
2822 	BUG_ON(objnr >= cachep->num);
2823 	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2824 
2825 	if (cachep->flags & SLAB_POISON) {
2826 		poison_obj(cachep, objp, POISON_FREE);
2827 		slab_kernel_map(cachep, objp, 0, caller);
2828 	}
2829 	return objp;
2830 }
2831 
2832 #else
2833 #define kfree_debugcheck(x) do { } while(0)
2834 #define cache_free_debugcheck(x,objp,z) (objp)
2835 #endif
2836 
2837 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2838 						void **list)
2839 {
2840 #if DEBUG
2841 	void *next = *list;
2842 	void *objp;
2843 
2844 	while (next) {
2845 		objp = next - obj_offset(cachep);
2846 		next = *(void **)next;
2847 		poison_obj(cachep, objp, POISON_FREE);
2848 	}
2849 #endif
2850 }
2851 
2852 static inline void fixup_slab_list(struct kmem_cache *cachep,
2853 				struct kmem_cache_node *n, struct page *page,
2854 				void **list)
2855 {
2856 	/* move slabp to correct slabp list: */
2857 	list_del(&page->lru);
2858 	if (page->active == cachep->num) {
2859 		list_add(&page->lru, &n->slabs_full);
2860 		if (OBJFREELIST_SLAB(cachep)) {
2861 #if DEBUG
2862 			/* Poisoning will be done without holding the lock */
2863 			if (cachep->flags & SLAB_POISON) {
2864 				void **objp = page->freelist;
2865 
2866 				*objp = *list;
2867 				*list = objp;
2868 			}
2869 #endif
2870 			page->freelist = NULL;
2871 		}
2872 	} else
2873 		list_add(&page->lru, &n->slabs_partial);
2874 }
2875 
2876 /* Try to find non-pfmemalloc slab if needed */
2877 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2878 					struct page *page, bool pfmemalloc)
2879 {
2880 	if (!page)
2881 		return NULL;
2882 
2883 	if (pfmemalloc)
2884 		return page;
2885 
2886 	if (!PageSlabPfmemalloc(page))
2887 		return page;
2888 
2889 	/* No need to keep pfmemalloc slab if we have enough free objects */
2890 	if (n->free_objects > n->free_limit) {
2891 		ClearPageSlabPfmemalloc(page);
2892 		return page;
2893 	}
2894 
2895 	/* Move pfmemalloc slab to the end of list to speed up next search */
2896 	list_del(&page->lru);
2897 	if (!page->active) {
2898 		list_add_tail(&page->lru, &n->slabs_free);
2899 		n->free_slabs++;
2900 	} else
2901 		list_add_tail(&page->lru, &n->slabs_partial);
2902 
2903 	list_for_each_entry(page, &n->slabs_partial, lru) {
2904 		if (!PageSlabPfmemalloc(page))
2905 			return page;
2906 	}
2907 
2908 	n->free_touched = 1;
2909 	list_for_each_entry(page, &n->slabs_free, lru) {
2910 		if (!PageSlabPfmemalloc(page)) {
2911 			n->free_slabs--;
2912 			return page;
2913 		}
2914 	}
2915 
2916 	return NULL;
2917 }
2918 
2919 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2920 {
2921 	struct page *page;
2922 
2923 	assert_spin_locked(&n->list_lock);
2924 	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2925 	if (!page) {
2926 		n->free_touched = 1;
2927 		page = list_first_entry_or_null(&n->slabs_free, struct page,
2928 						lru);
2929 		if (page)
2930 			n->free_slabs--;
2931 	}
2932 
2933 	if (sk_memalloc_socks())
2934 		page = get_valid_first_slab(n, page, pfmemalloc);
2935 
2936 	return page;
2937 }
2938 
2939 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2940 				struct kmem_cache_node *n, gfp_t flags)
2941 {
2942 	struct page *page;
2943 	void *obj;
2944 	void *list = NULL;
2945 
2946 	if (!gfp_pfmemalloc_allowed(flags))
2947 		return NULL;
2948 
2949 	spin_lock(&n->list_lock);
2950 	page = get_first_slab(n, true);
2951 	if (!page) {
2952 		spin_unlock(&n->list_lock);
2953 		return NULL;
2954 	}
2955 
2956 	obj = slab_get_obj(cachep, page);
2957 	n->free_objects--;
2958 
2959 	fixup_slab_list(cachep, n, page, &list);
2960 
2961 	spin_unlock(&n->list_lock);
2962 	fixup_objfreelist_debug(cachep, &list);
2963 
2964 	return obj;
2965 }
2966 
2967 /*
2968  * Slab list should be fixed up by fixup_slab_list() for existing slab
2969  * or cache_grow_end() for new slab
2970  */
2971 static __always_inline int alloc_block(struct kmem_cache *cachep,
2972 		struct array_cache *ac, struct page *page, int batchcount)
2973 {
2974 	/*
2975 	 * There must be at least one object available for
2976 	 * allocation.
2977 	 */
2978 	BUG_ON(page->active >= cachep->num);
2979 
2980 	while (page->active < cachep->num && batchcount--) {
2981 		STATS_INC_ALLOCED(cachep);
2982 		STATS_INC_ACTIVE(cachep);
2983 		STATS_SET_HIGH(cachep);
2984 
2985 		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2986 	}
2987 
2988 	return batchcount;
2989 }
2990 
2991 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2992 {
2993 	int batchcount;
2994 	struct kmem_cache_node *n;
2995 	struct array_cache *ac, *shared;
2996 	int node;
2997 	void *list = NULL;
2998 	struct page *page;
2999 
3000 	check_irq_off();
3001 	node = numa_mem_id();
3002 
3003 	ac = cpu_cache_get(cachep);
3004 	batchcount = ac->batchcount;
3005 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3006 		/*
3007 		 * If there was little recent activity on this cache, then
3008 		 * perform only a partial refill.  Otherwise we could generate
3009 		 * refill bouncing.
3010 		 */
3011 		batchcount = BATCHREFILL_LIMIT;
3012 	}
3013 	n = get_node(cachep, node);
3014 
3015 	BUG_ON(ac->avail > 0 || !n);
3016 	shared = READ_ONCE(n->shared);
3017 	if (!n->free_objects && (!shared || !shared->avail))
3018 		goto direct_grow;
3019 
3020 	spin_lock(&n->list_lock);
3021 	shared = READ_ONCE(n->shared);
3022 
3023 	/* See if we can refill from the shared array */
3024 	if (shared && transfer_objects(ac, shared, batchcount)) {
3025 		shared->touched = 1;
3026 		goto alloc_done;
3027 	}
3028 
3029 	while (batchcount > 0) {
3030 		/* Get slab alloc is to come from. */
3031 		page = get_first_slab(n, false);
3032 		if (!page)
3033 			goto must_grow;
3034 
3035 		check_spinlock_acquired(cachep);
3036 
3037 		batchcount = alloc_block(cachep, ac, page, batchcount);
3038 		fixup_slab_list(cachep, n, page, &list);
3039 	}
3040 
3041 must_grow:
3042 	n->free_objects -= ac->avail;
3043 alloc_done:
3044 	spin_unlock(&n->list_lock);
3045 	fixup_objfreelist_debug(cachep, &list);
3046 
3047 direct_grow:
3048 	if (unlikely(!ac->avail)) {
3049 		/* Check if we can use obj in pfmemalloc slab */
3050 		if (sk_memalloc_socks()) {
3051 			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3052 
3053 			if (obj)
3054 				return obj;
3055 		}
3056 
3057 		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3058 
3059 		/*
3060 		 * cache_grow_begin() can reenable interrupts,
3061 		 * then ac could change.
3062 		 */
3063 		ac = cpu_cache_get(cachep);
3064 		if (!ac->avail && page)
3065 			alloc_block(cachep, ac, page, batchcount);
3066 		cache_grow_end(cachep, page);
3067 
3068 		if (!ac->avail)
3069 			return NULL;
3070 	}
3071 	ac->touched = 1;
3072 
3073 	return ac->entry[--ac->avail];
3074 }
3075 
3076 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3077 						gfp_t flags)
3078 {
3079 	might_sleep_if(gfpflags_allow_blocking(flags));
3080 }
3081 
3082 #if DEBUG
3083 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3084 				gfp_t flags, void *objp, unsigned long caller)
3085 {
3086 	if (!objp)
3087 		return objp;
3088 	if (cachep->flags & SLAB_POISON) {
3089 		check_poison_obj(cachep, objp);
3090 		slab_kernel_map(cachep, objp, 1, 0);
3091 		poison_obj(cachep, objp, POISON_INUSE);
3092 	}
3093 	if (cachep->flags & SLAB_STORE_USER)
3094 		*dbg_userword(cachep, objp) = (void *)caller;
3095 
3096 	if (cachep->flags & SLAB_RED_ZONE) {
3097 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3098 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3099 			slab_error(cachep, "double free, or memory outside object was overwritten");
3100 			pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3101 			       objp, *dbg_redzone1(cachep, objp),
3102 			       *dbg_redzone2(cachep, objp));
3103 		}
3104 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3105 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3106 	}
3107 
3108 	objp += obj_offset(cachep);
3109 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3110 		cachep->ctor(objp);
3111 	if (ARCH_SLAB_MINALIGN &&
3112 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3113 		pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3114 		       objp, (int)ARCH_SLAB_MINALIGN);
3115 	}
3116 	return objp;
3117 }
3118 #else
3119 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3120 #endif
3121 
3122 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3123 {
3124 	void *objp;
3125 	struct array_cache *ac;
3126 
3127 	check_irq_off();
3128 
3129 	ac = cpu_cache_get(cachep);
3130 	if (likely(ac->avail)) {
3131 		ac->touched = 1;
3132 		objp = ac->entry[--ac->avail];
3133 
3134 		STATS_INC_ALLOCHIT(cachep);
3135 		goto out;
3136 	}
3137 
3138 	STATS_INC_ALLOCMISS(cachep);
3139 	objp = cache_alloc_refill(cachep, flags);
3140 	/*
3141 	 * the 'ac' may be updated by cache_alloc_refill(),
3142 	 * and kmemleak_erase() requires its correct value.
3143 	 */
3144 	ac = cpu_cache_get(cachep);
3145 
3146 out:
3147 	/*
3148 	 * To avoid a false negative, if an object that is in one of the
3149 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3150 	 * treat the array pointers as a reference to the object.
3151 	 */
3152 	if (objp)
3153 		kmemleak_erase(&ac->entry[ac->avail]);
3154 	return objp;
3155 }
3156 
3157 #ifdef CONFIG_NUMA
3158 /*
3159  * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3160  *
3161  * If we are in_interrupt, then process context, including cpusets and
3162  * mempolicy, may not apply and should not be used for allocation policy.
3163  */
3164 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3165 {
3166 	int nid_alloc, nid_here;
3167 
3168 	if (in_interrupt() || (flags & __GFP_THISNODE))
3169 		return NULL;
3170 	nid_alloc = nid_here = numa_mem_id();
3171 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3172 		nid_alloc = cpuset_slab_spread_node();
3173 	else if (current->mempolicy)
3174 		nid_alloc = mempolicy_slab_node();
3175 	if (nid_alloc != nid_here)
3176 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3177 	return NULL;
3178 }
3179 
3180 /*
3181  * Fallback function if there was no memory available and no objects on a
3182  * certain node and fall back is permitted. First we scan all the
3183  * available node for available objects. If that fails then we
3184  * perform an allocation without specifying a node. This allows the page
3185  * allocator to do its reclaim / fallback magic. We then insert the
3186  * slab into the proper nodelist and then allocate from it.
3187  */
3188 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3189 {
3190 	struct zonelist *zonelist;
3191 	struct zoneref *z;
3192 	struct zone *zone;
3193 	enum zone_type high_zoneidx = gfp_zone(flags);
3194 	void *obj = NULL;
3195 	struct page *page;
3196 	int nid;
3197 	unsigned int cpuset_mems_cookie;
3198 
3199 	if (flags & __GFP_THISNODE)
3200 		return NULL;
3201 
3202 retry_cpuset:
3203 	cpuset_mems_cookie = read_mems_allowed_begin();
3204 	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3205 
3206 retry:
3207 	/*
3208 	 * Look through allowed nodes for objects available
3209 	 * from existing per node queues.
3210 	 */
3211 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3212 		nid = zone_to_nid(zone);
3213 
3214 		if (cpuset_zone_allowed(zone, flags) &&
3215 			get_node(cache, nid) &&
3216 			get_node(cache, nid)->free_objects) {
3217 				obj = ____cache_alloc_node(cache,
3218 					gfp_exact_node(flags), nid);
3219 				if (obj)
3220 					break;
3221 		}
3222 	}
3223 
3224 	if (!obj) {
3225 		/*
3226 		 * This allocation will be performed within the constraints
3227 		 * of the current cpuset / memory policy requirements.
3228 		 * We may trigger various forms of reclaim on the allowed
3229 		 * set and go into memory reserves if necessary.
3230 		 */
3231 		page = cache_grow_begin(cache, flags, numa_mem_id());
3232 		cache_grow_end(cache, page);
3233 		if (page) {
3234 			nid = page_to_nid(page);
3235 			obj = ____cache_alloc_node(cache,
3236 				gfp_exact_node(flags), nid);
3237 
3238 			/*
3239 			 * Another processor may allocate the objects in
3240 			 * the slab since we are not holding any locks.
3241 			 */
3242 			if (!obj)
3243 				goto retry;
3244 		}
3245 	}
3246 
3247 	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3248 		goto retry_cpuset;
3249 	return obj;
3250 }
3251 
3252 /*
3253  * A interface to enable slab creation on nodeid
3254  */
3255 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3256 				int nodeid)
3257 {
3258 	struct page *page;
3259 	struct kmem_cache_node *n;
3260 	void *obj = NULL;
3261 	void *list = NULL;
3262 
3263 	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3264 	n = get_node(cachep, nodeid);
3265 	BUG_ON(!n);
3266 
3267 	check_irq_off();
3268 	spin_lock(&n->list_lock);
3269 	page = get_first_slab(n, false);
3270 	if (!page)
3271 		goto must_grow;
3272 
3273 	check_spinlock_acquired_node(cachep, nodeid);
3274 
3275 	STATS_INC_NODEALLOCS(cachep);
3276 	STATS_INC_ACTIVE(cachep);
3277 	STATS_SET_HIGH(cachep);
3278 
3279 	BUG_ON(page->active == cachep->num);
3280 
3281 	obj = slab_get_obj(cachep, page);
3282 	n->free_objects--;
3283 
3284 	fixup_slab_list(cachep, n, page, &list);
3285 
3286 	spin_unlock(&n->list_lock);
3287 	fixup_objfreelist_debug(cachep, &list);
3288 	return obj;
3289 
3290 must_grow:
3291 	spin_unlock(&n->list_lock);
3292 	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3293 	if (page) {
3294 		/* This slab isn't counted yet so don't update free_objects */
3295 		obj = slab_get_obj(cachep, page);
3296 	}
3297 	cache_grow_end(cachep, page);
3298 
3299 	return obj ? obj : fallback_alloc(cachep, flags);
3300 }
3301 
3302 static __always_inline void *
3303 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3304 		   unsigned long caller)
3305 {
3306 	unsigned long save_flags;
3307 	void *ptr;
3308 	int slab_node = numa_mem_id();
3309 
3310 	flags &= gfp_allowed_mask;
3311 	cachep = slab_pre_alloc_hook(cachep, flags);
3312 	if (unlikely(!cachep))
3313 		return NULL;
3314 
3315 	cache_alloc_debugcheck_before(cachep, flags);
3316 	local_irq_save(save_flags);
3317 
3318 	if (nodeid == NUMA_NO_NODE)
3319 		nodeid = slab_node;
3320 
3321 	if (unlikely(!get_node(cachep, nodeid))) {
3322 		/* Node not bootstrapped yet */
3323 		ptr = fallback_alloc(cachep, flags);
3324 		goto out;
3325 	}
3326 
3327 	if (nodeid == slab_node) {
3328 		/*
3329 		 * Use the locally cached objects if possible.
3330 		 * However ____cache_alloc does not allow fallback
3331 		 * to other nodes. It may fail while we still have
3332 		 * objects on other nodes available.
3333 		 */
3334 		ptr = ____cache_alloc(cachep, flags);
3335 		if (ptr)
3336 			goto out;
3337 	}
3338 	/* ___cache_alloc_node can fall back to other nodes */
3339 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3340   out:
3341 	local_irq_restore(save_flags);
3342 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3343 
3344 	if (unlikely(flags & __GFP_ZERO) && ptr)
3345 		memset(ptr, 0, cachep->object_size);
3346 
3347 	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3348 	return ptr;
3349 }
3350 
3351 static __always_inline void *
3352 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3353 {
3354 	void *objp;
3355 
3356 	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3357 		objp = alternate_node_alloc(cache, flags);
3358 		if (objp)
3359 			goto out;
3360 	}
3361 	objp = ____cache_alloc(cache, flags);
3362 
3363 	/*
3364 	 * We may just have run out of memory on the local node.
3365 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3366 	 */
3367 	if (!objp)
3368 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3369 
3370   out:
3371 	return objp;
3372 }
3373 #else
3374 
3375 static __always_inline void *
3376 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3377 {
3378 	return ____cache_alloc(cachep, flags);
3379 }
3380 
3381 #endif /* CONFIG_NUMA */
3382 
3383 static __always_inline void *
3384 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3385 {
3386 	unsigned long save_flags;
3387 	void *objp;
3388 
3389 	flags &= gfp_allowed_mask;
3390 	cachep = slab_pre_alloc_hook(cachep, flags);
3391 	if (unlikely(!cachep))
3392 		return NULL;
3393 
3394 	cache_alloc_debugcheck_before(cachep, flags);
3395 	local_irq_save(save_flags);
3396 	objp = __do_cache_alloc(cachep, flags);
3397 	local_irq_restore(save_flags);
3398 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3399 	prefetchw(objp);
3400 
3401 	if (unlikely(flags & __GFP_ZERO) && objp)
3402 		memset(objp, 0, cachep->object_size);
3403 
3404 	slab_post_alloc_hook(cachep, flags, 1, &objp);
3405 	return objp;
3406 }
3407 
3408 /*
3409  * Caller needs to acquire correct kmem_cache_node's list_lock
3410  * @list: List of detached free slabs should be freed by caller
3411  */
3412 static void free_block(struct kmem_cache *cachep, void **objpp,
3413 			int nr_objects, int node, struct list_head *list)
3414 {
3415 	int i;
3416 	struct kmem_cache_node *n = get_node(cachep, node);
3417 	struct page *page;
3418 
3419 	n->free_objects += nr_objects;
3420 
3421 	for (i = 0; i < nr_objects; i++) {
3422 		void *objp;
3423 		struct page *page;
3424 
3425 		objp = objpp[i];
3426 
3427 		page = virt_to_head_page(objp);
3428 		list_del(&page->lru);
3429 		check_spinlock_acquired_node(cachep, node);
3430 		slab_put_obj(cachep, page, objp);
3431 		STATS_DEC_ACTIVE(cachep);
3432 
3433 		/* fixup slab chains */
3434 		if (page->active == 0) {
3435 			list_add(&page->lru, &n->slabs_free);
3436 			n->free_slabs++;
3437 		} else {
3438 			/* Unconditionally move a slab to the end of the
3439 			 * partial list on free - maximum time for the
3440 			 * other objects to be freed, too.
3441 			 */
3442 			list_add_tail(&page->lru, &n->slabs_partial);
3443 		}
3444 	}
3445 
3446 	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3447 		n->free_objects -= cachep->num;
3448 
3449 		page = list_last_entry(&n->slabs_free, struct page, lru);
3450 		list_move(&page->lru, list);
3451 		n->free_slabs--;
3452 		n->total_slabs--;
3453 	}
3454 }
3455 
3456 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3457 {
3458 	int batchcount;
3459 	struct kmem_cache_node *n;
3460 	int node = numa_mem_id();
3461 	LIST_HEAD(list);
3462 
3463 	batchcount = ac->batchcount;
3464 
3465 	check_irq_off();
3466 	n = get_node(cachep, node);
3467 	spin_lock(&n->list_lock);
3468 	if (n->shared) {
3469 		struct array_cache *shared_array = n->shared;
3470 		int max = shared_array->limit - shared_array->avail;
3471 		if (max) {
3472 			if (batchcount > max)
3473 				batchcount = max;
3474 			memcpy(&(shared_array->entry[shared_array->avail]),
3475 			       ac->entry, sizeof(void *) * batchcount);
3476 			shared_array->avail += batchcount;
3477 			goto free_done;
3478 		}
3479 	}
3480 
3481 	free_block(cachep, ac->entry, batchcount, node, &list);
3482 free_done:
3483 #if STATS
3484 	{
3485 		int i = 0;
3486 		struct page *page;
3487 
3488 		list_for_each_entry(page, &n->slabs_free, lru) {
3489 			BUG_ON(page->active);
3490 
3491 			i++;
3492 		}
3493 		STATS_SET_FREEABLE(cachep, i);
3494 	}
3495 #endif
3496 	spin_unlock(&n->list_lock);
3497 	slabs_destroy(cachep, &list);
3498 	ac->avail -= batchcount;
3499 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3500 }
3501 
3502 /*
3503  * Release an obj back to its cache. If the obj has a constructed state, it must
3504  * be in this state _before_ it is released.  Called with disabled ints.
3505  */
3506 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3507 				unsigned long caller)
3508 {
3509 	/* Put the object into the quarantine, don't touch it for now. */
3510 	if (kasan_slab_free(cachep, objp))
3511 		return;
3512 
3513 	___cache_free(cachep, objp, caller);
3514 }
3515 
3516 void ___cache_free(struct kmem_cache *cachep, void *objp,
3517 		unsigned long caller)
3518 {
3519 	struct array_cache *ac = cpu_cache_get(cachep);
3520 
3521 	check_irq_off();
3522 	kmemleak_free_recursive(objp, cachep->flags);
3523 	objp = cache_free_debugcheck(cachep, objp, caller);
3524 
3525 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3526 
3527 	/*
3528 	 * Skip calling cache_free_alien() when the platform is not numa.
3529 	 * This will avoid cache misses that happen while accessing slabp (which
3530 	 * is per page memory  reference) to get nodeid. Instead use a global
3531 	 * variable to skip the call, which is mostly likely to be present in
3532 	 * the cache.
3533 	 */
3534 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3535 		return;
3536 
3537 	if (ac->avail < ac->limit) {
3538 		STATS_INC_FREEHIT(cachep);
3539 	} else {
3540 		STATS_INC_FREEMISS(cachep);
3541 		cache_flusharray(cachep, ac);
3542 	}
3543 
3544 	if (sk_memalloc_socks()) {
3545 		struct page *page = virt_to_head_page(objp);
3546 
3547 		if (unlikely(PageSlabPfmemalloc(page))) {
3548 			cache_free_pfmemalloc(cachep, page, objp);
3549 			return;
3550 		}
3551 	}
3552 
3553 	ac->entry[ac->avail++] = objp;
3554 }
3555 
3556 /**
3557  * kmem_cache_alloc - Allocate an object
3558  * @cachep: The cache to allocate from.
3559  * @flags: See kmalloc().
3560  *
3561  * Allocate an object from this cache.  The flags are only relevant
3562  * if the cache has no available objects.
3563  */
3564 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3565 {
3566 	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3567 
3568 	kasan_slab_alloc(cachep, ret, flags);
3569 	trace_kmem_cache_alloc(_RET_IP_, ret,
3570 			       cachep->object_size, cachep->size, flags);
3571 
3572 	return ret;
3573 }
3574 EXPORT_SYMBOL(kmem_cache_alloc);
3575 
3576 static __always_inline void
3577 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3578 				  size_t size, void **p, unsigned long caller)
3579 {
3580 	size_t i;
3581 
3582 	for (i = 0; i < size; i++)
3583 		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3584 }
3585 
3586 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3587 			  void **p)
3588 {
3589 	size_t i;
3590 
3591 	s = slab_pre_alloc_hook(s, flags);
3592 	if (!s)
3593 		return 0;
3594 
3595 	cache_alloc_debugcheck_before(s, flags);
3596 
3597 	local_irq_disable();
3598 	for (i = 0; i < size; i++) {
3599 		void *objp = __do_cache_alloc(s, flags);
3600 
3601 		if (unlikely(!objp))
3602 			goto error;
3603 		p[i] = objp;
3604 	}
3605 	local_irq_enable();
3606 
3607 	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3608 
3609 	/* Clear memory outside IRQ disabled section */
3610 	if (unlikely(flags & __GFP_ZERO))
3611 		for (i = 0; i < size; i++)
3612 			memset(p[i], 0, s->object_size);
3613 
3614 	slab_post_alloc_hook(s, flags, size, p);
3615 	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3616 	return size;
3617 error:
3618 	local_irq_enable();
3619 	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3620 	slab_post_alloc_hook(s, flags, i, p);
3621 	__kmem_cache_free_bulk(s, i, p);
3622 	return 0;
3623 }
3624 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3625 
3626 #ifdef CONFIG_TRACING
3627 void *
3628 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3629 {
3630 	void *ret;
3631 
3632 	ret = slab_alloc(cachep, flags, _RET_IP_);
3633 
3634 	kasan_kmalloc(cachep, ret, size, flags);
3635 	trace_kmalloc(_RET_IP_, ret,
3636 		      size, cachep->size, flags);
3637 	return ret;
3638 }
3639 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3640 #endif
3641 
3642 #ifdef CONFIG_NUMA
3643 /**
3644  * kmem_cache_alloc_node - Allocate an object on the specified node
3645  * @cachep: The cache to allocate from.
3646  * @flags: See kmalloc().
3647  * @nodeid: node number of the target node.
3648  *
3649  * Identical to kmem_cache_alloc but it will allocate memory on the given
3650  * node, which can improve the performance for cpu bound structures.
3651  *
3652  * Fallback to other node is possible if __GFP_THISNODE is not set.
3653  */
3654 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3655 {
3656 	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3657 
3658 	kasan_slab_alloc(cachep, ret, flags);
3659 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3660 				    cachep->object_size, cachep->size,
3661 				    flags, nodeid);
3662 
3663 	return ret;
3664 }
3665 EXPORT_SYMBOL(kmem_cache_alloc_node);
3666 
3667 #ifdef CONFIG_TRACING
3668 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3669 				  gfp_t flags,
3670 				  int nodeid,
3671 				  size_t size)
3672 {
3673 	void *ret;
3674 
3675 	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3676 
3677 	kasan_kmalloc(cachep, ret, size, flags);
3678 	trace_kmalloc_node(_RET_IP_, ret,
3679 			   size, cachep->size,
3680 			   flags, nodeid);
3681 	return ret;
3682 }
3683 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3684 #endif
3685 
3686 static __always_inline void *
3687 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3688 {
3689 	struct kmem_cache *cachep;
3690 	void *ret;
3691 
3692 	cachep = kmalloc_slab(size, flags);
3693 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3694 		return cachep;
3695 	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3696 	kasan_kmalloc(cachep, ret, size, flags);
3697 
3698 	return ret;
3699 }
3700 
3701 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3702 {
3703 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3704 }
3705 EXPORT_SYMBOL(__kmalloc_node);
3706 
3707 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3708 		int node, unsigned long caller)
3709 {
3710 	return __do_kmalloc_node(size, flags, node, caller);
3711 }
3712 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3713 #endif /* CONFIG_NUMA */
3714 
3715 /**
3716  * __do_kmalloc - allocate memory
3717  * @size: how many bytes of memory are required.
3718  * @flags: the type of memory to allocate (see kmalloc).
3719  * @caller: function caller for debug tracking of the caller
3720  */
3721 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3722 					  unsigned long caller)
3723 {
3724 	struct kmem_cache *cachep;
3725 	void *ret;
3726 
3727 	cachep = kmalloc_slab(size, flags);
3728 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3729 		return cachep;
3730 	ret = slab_alloc(cachep, flags, caller);
3731 
3732 	kasan_kmalloc(cachep, ret, size, flags);
3733 	trace_kmalloc(caller, ret,
3734 		      size, cachep->size, flags);
3735 
3736 	return ret;
3737 }
3738 
3739 void *__kmalloc(size_t size, gfp_t flags)
3740 {
3741 	return __do_kmalloc(size, flags, _RET_IP_);
3742 }
3743 EXPORT_SYMBOL(__kmalloc);
3744 
3745 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3746 {
3747 	return __do_kmalloc(size, flags, caller);
3748 }
3749 EXPORT_SYMBOL(__kmalloc_track_caller);
3750 
3751 /**
3752  * kmem_cache_free - Deallocate an object
3753  * @cachep: The cache the allocation was from.
3754  * @objp: The previously allocated object.
3755  *
3756  * Free an object which was previously allocated from this
3757  * cache.
3758  */
3759 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3760 {
3761 	unsigned long flags;
3762 	cachep = cache_from_obj(cachep, objp);
3763 	if (!cachep)
3764 		return;
3765 
3766 	local_irq_save(flags);
3767 	debug_check_no_locks_freed(objp, cachep->object_size);
3768 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3769 		debug_check_no_obj_freed(objp, cachep->object_size);
3770 	__cache_free(cachep, objp, _RET_IP_);
3771 	local_irq_restore(flags);
3772 
3773 	trace_kmem_cache_free(_RET_IP_, objp);
3774 }
3775 EXPORT_SYMBOL(kmem_cache_free);
3776 
3777 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3778 {
3779 	struct kmem_cache *s;
3780 	size_t i;
3781 
3782 	local_irq_disable();
3783 	for (i = 0; i < size; i++) {
3784 		void *objp = p[i];
3785 
3786 		if (!orig_s) /* called via kfree_bulk */
3787 			s = virt_to_cache(objp);
3788 		else
3789 			s = cache_from_obj(orig_s, objp);
3790 
3791 		debug_check_no_locks_freed(objp, s->object_size);
3792 		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3793 			debug_check_no_obj_freed(objp, s->object_size);
3794 
3795 		__cache_free(s, objp, _RET_IP_);
3796 	}
3797 	local_irq_enable();
3798 
3799 	/* FIXME: add tracing */
3800 }
3801 EXPORT_SYMBOL(kmem_cache_free_bulk);
3802 
3803 /**
3804  * kfree - free previously allocated memory
3805  * @objp: pointer returned by kmalloc.
3806  *
3807  * If @objp is NULL, no operation is performed.
3808  *
3809  * Don't free memory not originally allocated by kmalloc()
3810  * or you will run into trouble.
3811  */
3812 void kfree(const void *objp)
3813 {
3814 	struct kmem_cache *c;
3815 	unsigned long flags;
3816 
3817 	trace_kfree(_RET_IP_, objp);
3818 
3819 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3820 		return;
3821 	local_irq_save(flags);
3822 	kfree_debugcheck(objp);
3823 	c = virt_to_cache(objp);
3824 	debug_check_no_locks_freed(objp, c->object_size);
3825 
3826 	debug_check_no_obj_freed(objp, c->object_size);
3827 	__cache_free(c, (void *)objp, _RET_IP_);
3828 	local_irq_restore(flags);
3829 }
3830 EXPORT_SYMBOL(kfree);
3831 
3832 /*
3833  * This initializes kmem_cache_node or resizes various caches for all nodes.
3834  */
3835 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3836 {
3837 	int ret;
3838 	int node;
3839 	struct kmem_cache_node *n;
3840 
3841 	for_each_online_node(node) {
3842 		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3843 		if (ret)
3844 			goto fail;
3845 
3846 	}
3847 
3848 	return 0;
3849 
3850 fail:
3851 	if (!cachep->list.next) {
3852 		/* Cache is not active yet. Roll back what we did */
3853 		node--;
3854 		while (node >= 0) {
3855 			n = get_node(cachep, node);
3856 			if (n) {
3857 				kfree(n->shared);
3858 				free_alien_cache(n->alien);
3859 				kfree(n);
3860 				cachep->node[node] = NULL;
3861 			}
3862 			node--;
3863 		}
3864 	}
3865 	return -ENOMEM;
3866 }
3867 
3868 /* Always called with the slab_mutex held */
3869 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3870 				int batchcount, int shared, gfp_t gfp)
3871 {
3872 	struct array_cache __percpu *cpu_cache, *prev;
3873 	int cpu;
3874 
3875 	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3876 	if (!cpu_cache)
3877 		return -ENOMEM;
3878 
3879 	prev = cachep->cpu_cache;
3880 	cachep->cpu_cache = cpu_cache;
3881 	kick_all_cpus_sync();
3882 
3883 	check_irq_on();
3884 	cachep->batchcount = batchcount;
3885 	cachep->limit = limit;
3886 	cachep->shared = shared;
3887 
3888 	if (!prev)
3889 		goto setup_node;
3890 
3891 	for_each_online_cpu(cpu) {
3892 		LIST_HEAD(list);
3893 		int node;
3894 		struct kmem_cache_node *n;
3895 		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3896 
3897 		node = cpu_to_mem(cpu);
3898 		n = get_node(cachep, node);
3899 		spin_lock_irq(&n->list_lock);
3900 		free_block(cachep, ac->entry, ac->avail, node, &list);
3901 		spin_unlock_irq(&n->list_lock);
3902 		slabs_destroy(cachep, &list);
3903 	}
3904 	free_percpu(prev);
3905 
3906 setup_node:
3907 	return setup_kmem_cache_nodes(cachep, gfp);
3908 }
3909 
3910 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3911 				int batchcount, int shared, gfp_t gfp)
3912 {
3913 	int ret;
3914 	struct kmem_cache *c;
3915 
3916 	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3917 
3918 	if (slab_state < FULL)
3919 		return ret;
3920 
3921 	if ((ret < 0) || !is_root_cache(cachep))
3922 		return ret;
3923 
3924 	lockdep_assert_held(&slab_mutex);
3925 	for_each_memcg_cache(c, cachep) {
3926 		/* return value determined by the root cache only */
3927 		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3928 	}
3929 
3930 	return ret;
3931 }
3932 
3933 /* Called with slab_mutex held always */
3934 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3935 {
3936 	int err;
3937 	int limit = 0;
3938 	int shared = 0;
3939 	int batchcount = 0;
3940 
3941 	err = cache_random_seq_create(cachep, cachep->num, gfp);
3942 	if (err)
3943 		goto end;
3944 
3945 	if (!is_root_cache(cachep)) {
3946 		struct kmem_cache *root = memcg_root_cache(cachep);
3947 		limit = root->limit;
3948 		shared = root->shared;
3949 		batchcount = root->batchcount;
3950 	}
3951 
3952 	if (limit && shared && batchcount)
3953 		goto skip_setup;
3954 	/*
3955 	 * The head array serves three purposes:
3956 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3957 	 * - reduce the number of spinlock operations.
3958 	 * - reduce the number of linked list operations on the slab and
3959 	 *   bufctl chains: array operations are cheaper.
3960 	 * The numbers are guessed, we should auto-tune as described by
3961 	 * Bonwick.
3962 	 */
3963 	if (cachep->size > 131072)
3964 		limit = 1;
3965 	else if (cachep->size > PAGE_SIZE)
3966 		limit = 8;
3967 	else if (cachep->size > 1024)
3968 		limit = 24;
3969 	else if (cachep->size > 256)
3970 		limit = 54;
3971 	else
3972 		limit = 120;
3973 
3974 	/*
3975 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3976 	 * allocation behaviour: Most allocs on one cpu, most free operations
3977 	 * on another cpu. For these cases, an efficient object passing between
3978 	 * cpus is necessary. This is provided by a shared array. The array
3979 	 * replaces Bonwick's magazine layer.
3980 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3981 	 * to a larger limit. Thus disabled by default.
3982 	 */
3983 	shared = 0;
3984 	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3985 		shared = 8;
3986 
3987 #if DEBUG
3988 	/*
3989 	 * With debugging enabled, large batchcount lead to excessively long
3990 	 * periods with disabled local interrupts. Limit the batchcount
3991 	 */
3992 	if (limit > 32)
3993 		limit = 32;
3994 #endif
3995 	batchcount = (limit + 1) / 2;
3996 skip_setup:
3997 	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3998 end:
3999 	if (err)
4000 		pr_err("enable_cpucache failed for %s, error %d\n",
4001 		       cachep->name, -err);
4002 	return err;
4003 }
4004 
4005 /*
4006  * Drain an array if it contains any elements taking the node lock only if
4007  * necessary. Note that the node listlock also protects the array_cache
4008  * if drain_array() is used on the shared array.
4009  */
4010 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4011 			 struct array_cache *ac, int node)
4012 {
4013 	LIST_HEAD(list);
4014 
4015 	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
4016 	check_mutex_acquired();
4017 
4018 	if (!ac || !ac->avail)
4019 		return;
4020 
4021 	if (ac->touched) {
4022 		ac->touched = 0;
4023 		return;
4024 	}
4025 
4026 	spin_lock_irq(&n->list_lock);
4027 	drain_array_locked(cachep, ac, node, false, &list);
4028 	spin_unlock_irq(&n->list_lock);
4029 
4030 	slabs_destroy(cachep, &list);
4031 }
4032 
4033 /**
4034  * cache_reap - Reclaim memory from caches.
4035  * @w: work descriptor
4036  *
4037  * Called from workqueue/eventd every few seconds.
4038  * Purpose:
4039  * - clear the per-cpu caches for this CPU.
4040  * - return freeable pages to the main free memory pool.
4041  *
4042  * If we cannot acquire the cache chain mutex then just give up - we'll try
4043  * again on the next iteration.
4044  */
4045 static void cache_reap(struct work_struct *w)
4046 {
4047 	struct kmem_cache *searchp;
4048 	struct kmem_cache_node *n;
4049 	int node = numa_mem_id();
4050 	struct delayed_work *work = to_delayed_work(w);
4051 
4052 	if (!mutex_trylock(&slab_mutex))
4053 		/* Give up. Setup the next iteration. */
4054 		goto out;
4055 
4056 	list_for_each_entry(searchp, &slab_caches, list) {
4057 		check_irq_on();
4058 
4059 		/*
4060 		 * We only take the node lock if absolutely necessary and we
4061 		 * have established with reasonable certainty that
4062 		 * we can do some work if the lock was obtained.
4063 		 */
4064 		n = get_node(searchp, node);
4065 
4066 		reap_alien(searchp, n);
4067 
4068 		drain_array(searchp, n, cpu_cache_get(searchp), node);
4069 
4070 		/*
4071 		 * These are racy checks but it does not matter
4072 		 * if we skip one check or scan twice.
4073 		 */
4074 		if (time_after(n->next_reap, jiffies))
4075 			goto next;
4076 
4077 		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4078 
4079 		drain_array(searchp, n, n->shared, node);
4080 
4081 		if (n->free_touched)
4082 			n->free_touched = 0;
4083 		else {
4084 			int freed;
4085 
4086 			freed = drain_freelist(searchp, n, (n->free_limit +
4087 				5 * searchp->num - 1) / (5 * searchp->num));
4088 			STATS_ADD_REAPED(searchp, freed);
4089 		}
4090 next:
4091 		cond_resched();
4092 	}
4093 	check_irq_on();
4094 	mutex_unlock(&slab_mutex);
4095 	next_reap_node();
4096 out:
4097 	/* Set up the next iteration */
4098 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
4099 }
4100 
4101 #ifdef CONFIG_SLABINFO
4102 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4103 {
4104 	unsigned long active_objs, num_objs, active_slabs;
4105 	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4106 	unsigned long free_slabs = 0;
4107 	int node;
4108 	struct kmem_cache_node *n;
4109 
4110 	for_each_kmem_cache_node(cachep, node, n) {
4111 		check_irq_on();
4112 		spin_lock_irq(&n->list_lock);
4113 
4114 		total_slabs += n->total_slabs;
4115 		free_slabs += n->free_slabs;
4116 		free_objs += n->free_objects;
4117 
4118 		if (n->shared)
4119 			shared_avail += n->shared->avail;
4120 
4121 		spin_unlock_irq(&n->list_lock);
4122 	}
4123 	num_objs = total_slabs * cachep->num;
4124 	active_slabs = total_slabs - free_slabs;
4125 	active_objs = num_objs - free_objs;
4126 
4127 	sinfo->active_objs = active_objs;
4128 	sinfo->num_objs = num_objs;
4129 	sinfo->active_slabs = active_slabs;
4130 	sinfo->num_slabs = total_slabs;
4131 	sinfo->shared_avail = shared_avail;
4132 	sinfo->limit = cachep->limit;
4133 	sinfo->batchcount = cachep->batchcount;
4134 	sinfo->shared = cachep->shared;
4135 	sinfo->objects_per_slab = cachep->num;
4136 	sinfo->cache_order = cachep->gfporder;
4137 }
4138 
4139 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4140 {
4141 #if STATS
4142 	{			/* node stats */
4143 		unsigned long high = cachep->high_mark;
4144 		unsigned long allocs = cachep->num_allocations;
4145 		unsigned long grown = cachep->grown;
4146 		unsigned long reaped = cachep->reaped;
4147 		unsigned long errors = cachep->errors;
4148 		unsigned long max_freeable = cachep->max_freeable;
4149 		unsigned long node_allocs = cachep->node_allocs;
4150 		unsigned long node_frees = cachep->node_frees;
4151 		unsigned long overflows = cachep->node_overflow;
4152 
4153 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4154 			   allocs, high, grown,
4155 			   reaped, errors, max_freeable, node_allocs,
4156 			   node_frees, overflows);
4157 	}
4158 	/* cpu stats */
4159 	{
4160 		unsigned long allochit = atomic_read(&cachep->allochit);
4161 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4162 		unsigned long freehit = atomic_read(&cachep->freehit);
4163 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4164 
4165 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4166 			   allochit, allocmiss, freehit, freemiss);
4167 	}
4168 #endif
4169 }
4170 
4171 #define MAX_SLABINFO_WRITE 128
4172 /**
4173  * slabinfo_write - Tuning for the slab allocator
4174  * @file: unused
4175  * @buffer: user buffer
4176  * @count: data length
4177  * @ppos: unused
4178  */
4179 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4180 		       size_t count, loff_t *ppos)
4181 {
4182 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4183 	int limit, batchcount, shared, res;
4184 	struct kmem_cache *cachep;
4185 
4186 	if (count > MAX_SLABINFO_WRITE)
4187 		return -EINVAL;
4188 	if (copy_from_user(&kbuf, buffer, count))
4189 		return -EFAULT;
4190 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4191 
4192 	tmp = strchr(kbuf, ' ');
4193 	if (!tmp)
4194 		return -EINVAL;
4195 	*tmp = '\0';
4196 	tmp++;
4197 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4198 		return -EINVAL;
4199 
4200 	/* Find the cache in the chain of caches. */
4201 	mutex_lock(&slab_mutex);
4202 	res = -EINVAL;
4203 	list_for_each_entry(cachep, &slab_caches, list) {
4204 		if (!strcmp(cachep->name, kbuf)) {
4205 			if (limit < 1 || batchcount < 1 ||
4206 					batchcount > limit || shared < 0) {
4207 				res = 0;
4208 			} else {
4209 				res = do_tune_cpucache(cachep, limit,
4210 						       batchcount, shared,
4211 						       GFP_KERNEL);
4212 			}
4213 			break;
4214 		}
4215 	}
4216 	mutex_unlock(&slab_mutex);
4217 	if (res >= 0)
4218 		res = count;
4219 	return res;
4220 }
4221 
4222 #ifdef CONFIG_DEBUG_SLAB_LEAK
4223 
4224 static inline int add_caller(unsigned long *n, unsigned long v)
4225 {
4226 	unsigned long *p;
4227 	int l;
4228 	if (!v)
4229 		return 1;
4230 	l = n[1];
4231 	p = n + 2;
4232 	while (l) {
4233 		int i = l/2;
4234 		unsigned long *q = p + 2 * i;
4235 		if (*q == v) {
4236 			q[1]++;
4237 			return 1;
4238 		}
4239 		if (*q > v) {
4240 			l = i;
4241 		} else {
4242 			p = q + 2;
4243 			l -= i + 1;
4244 		}
4245 	}
4246 	if (++n[1] == n[0])
4247 		return 0;
4248 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4249 	p[0] = v;
4250 	p[1] = 1;
4251 	return 1;
4252 }
4253 
4254 static void handle_slab(unsigned long *n, struct kmem_cache *c,
4255 						struct page *page)
4256 {
4257 	void *p;
4258 	int i, j;
4259 	unsigned long v;
4260 
4261 	if (n[0] == n[1])
4262 		return;
4263 	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4264 		bool active = true;
4265 
4266 		for (j = page->active; j < c->num; j++) {
4267 			if (get_free_obj(page, j) == i) {
4268 				active = false;
4269 				break;
4270 			}
4271 		}
4272 
4273 		if (!active)
4274 			continue;
4275 
4276 		/*
4277 		 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4278 		 * mapping is established when actual object allocation and
4279 		 * we could mistakenly access the unmapped object in the cpu
4280 		 * cache.
4281 		 */
4282 		if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4283 			continue;
4284 
4285 		if (!add_caller(n, v))
4286 			return;
4287 	}
4288 }
4289 
4290 static void show_symbol(struct seq_file *m, unsigned long address)
4291 {
4292 #ifdef CONFIG_KALLSYMS
4293 	unsigned long offset, size;
4294 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4295 
4296 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4297 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4298 		if (modname[0])
4299 			seq_printf(m, " [%s]", modname);
4300 		return;
4301 	}
4302 #endif
4303 	seq_printf(m, "%p", (void *)address);
4304 }
4305 
4306 static int leaks_show(struct seq_file *m, void *p)
4307 {
4308 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4309 	struct page *page;
4310 	struct kmem_cache_node *n;
4311 	const char *name;
4312 	unsigned long *x = m->private;
4313 	int node;
4314 	int i;
4315 
4316 	if (!(cachep->flags & SLAB_STORE_USER))
4317 		return 0;
4318 	if (!(cachep->flags & SLAB_RED_ZONE))
4319 		return 0;
4320 
4321 	/*
4322 	 * Set store_user_clean and start to grab stored user information
4323 	 * for all objects on this cache. If some alloc/free requests comes
4324 	 * during the processing, information would be wrong so restart
4325 	 * whole processing.
4326 	 */
4327 	do {
4328 		set_store_user_clean(cachep);
4329 		drain_cpu_caches(cachep);
4330 
4331 		x[1] = 0;
4332 
4333 		for_each_kmem_cache_node(cachep, node, n) {
4334 
4335 			check_irq_on();
4336 			spin_lock_irq(&n->list_lock);
4337 
4338 			list_for_each_entry(page, &n->slabs_full, lru)
4339 				handle_slab(x, cachep, page);
4340 			list_for_each_entry(page, &n->slabs_partial, lru)
4341 				handle_slab(x, cachep, page);
4342 			spin_unlock_irq(&n->list_lock);
4343 		}
4344 	} while (!is_store_user_clean(cachep));
4345 
4346 	name = cachep->name;
4347 	if (x[0] == x[1]) {
4348 		/* Increase the buffer size */
4349 		mutex_unlock(&slab_mutex);
4350 		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4351 		if (!m->private) {
4352 			/* Too bad, we are really out */
4353 			m->private = x;
4354 			mutex_lock(&slab_mutex);
4355 			return -ENOMEM;
4356 		}
4357 		*(unsigned long *)m->private = x[0] * 2;
4358 		kfree(x);
4359 		mutex_lock(&slab_mutex);
4360 		/* Now make sure this entry will be retried */
4361 		m->count = m->size;
4362 		return 0;
4363 	}
4364 	for (i = 0; i < x[1]; i++) {
4365 		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4366 		show_symbol(m, x[2*i+2]);
4367 		seq_putc(m, '\n');
4368 	}
4369 
4370 	return 0;
4371 }
4372 
4373 static const struct seq_operations slabstats_op = {
4374 	.start = slab_start,
4375 	.next = slab_next,
4376 	.stop = slab_stop,
4377 	.show = leaks_show,
4378 };
4379 
4380 static int slabstats_open(struct inode *inode, struct file *file)
4381 {
4382 	unsigned long *n;
4383 
4384 	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4385 	if (!n)
4386 		return -ENOMEM;
4387 
4388 	*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4389 
4390 	return 0;
4391 }
4392 
4393 static const struct file_operations proc_slabstats_operations = {
4394 	.open		= slabstats_open,
4395 	.read		= seq_read,
4396 	.llseek		= seq_lseek,
4397 	.release	= seq_release_private,
4398 };
4399 #endif
4400 
4401 static int __init slab_proc_init(void)
4402 {
4403 #ifdef CONFIG_DEBUG_SLAB_LEAK
4404 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4405 #endif
4406 	return 0;
4407 }
4408 module_init(slab_proc_init);
4409 #endif
4410 
4411 #ifdef CONFIG_HARDENED_USERCOPY
4412 /*
4413  * Rejects objects that are incorrectly sized.
4414  *
4415  * Returns NULL if check passes, otherwise const char * to name of cache
4416  * to indicate an error.
4417  */
4418 const char *__check_heap_object(const void *ptr, unsigned long n,
4419 				struct page *page)
4420 {
4421 	struct kmem_cache *cachep;
4422 	unsigned int objnr;
4423 	unsigned long offset;
4424 
4425 	/* Find and validate object. */
4426 	cachep = page->slab_cache;
4427 	objnr = obj_to_index(cachep, page, (void *)ptr);
4428 	BUG_ON(objnr >= cachep->num);
4429 
4430 	/* Find offset within object. */
4431 	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4432 
4433 	/* Allow address range falling entirely within object size. */
4434 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4435 		return NULL;
4436 
4437 	return cachep->name;
4438 }
4439 #endif /* CONFIG_HARDENED_USERCOPY */
4440 
4441 /**
4442  * ksize - get the actual amount of memory allocated for a given object
4443  * @objp: Pointer to the object
4444  *
4445  * kmalloc may internally round up allocations and return more memory
4446  * than requested. ksize() can be used to determine the actual amount of
4447  * memory allocated. The caller may use this additional memory, even though
4448  * a smaller amount of memory was initially specified with the kmalloc call.
4449  * The caller must guarantee that objp points to a valid object previously
4450  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4451  * must not be freed during the duration of the call.
4452  */
4453 size_t ksize(const void *objp)
4454 {
4455 	size_t size;
4456 
4457 	BUG_ON(!objp);
4458 	if (unlikely(objp == ZERO_SIZE_PTR))
4459 		return 0;
4460 
4461 	size = virt_to_cache(objp)->object_size;
4462 	/* We assume that ksize callers could use the whole allocated area,
4463 	 * so we need to unpoison this area.
4464 	 */
4465 	kasan_unpoison_shadow(objp, size);
4466 
4467 	return size;
4468 }
4469 EXPORT_SYMBOL(ksize);
4470