xref: /openbmc/linux/mm/slab.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/slab.c
4  * Written by Mark Hemment, 1996/97.
5  * (markhe@nextd.demon.co.uk)
6  *
7  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
8  *
9  * Major cleanup, different bufctl logic, per-cpu arrays
10  *	(c) 2000 Manfred Spraul
11  *
12  * Cleanup, make the head arrays unconditional, preparation for NUMA
13  * 	(c) 2002 Manfred Spraul
14  *
15  * An implementation of the Slab Allocator as described in outline in;
16  *	UNIX Internals: The New Frontiers by Uresh Vahalia
17  *	Pub: Prentice Hall	ISBN 0-13-101908-2
18  * or with a little more detail in;
19  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
20  *	Jeff Bonwick (Sun Microsystems).
21  *	Presented at: USENIX Summer 1994 Technical Conference
22  *
23  * The memory is organized in caches, one cache for each object type.
24  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
25  * Each cache consists out of many slabs (they are small (usually one
26  * page long) and always contiguous), and each slab contains multiple
27  * initialized objects.
28  *
29  * This means, that your constructor is used only for newly allocated
30  * slabs and you must pass objects with the same initializations to
31  * kmem_cache_free.
32  *
33  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
34  * normal). If you need a special memory type, then must create a new
35  * cache for that memory type.
36  *
37  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
38  *   full slabs with 0 free objects
39  *   partial slabs
40  *   empty slabs with no allocated objects
41  *
42  * If partial slabs exist, then new allocations come from these slabs,
43  * otherwise from empty slabs or new slabs are allocated.
44  *
45  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
46  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
47  *
48  * Each cache has a short per-cpu head array, most allocs
49  * and frees go into that array, and if that array overflows, then 1/2
50  * of the entries in the array are given back into the global cache.
51  * The head array is strictly LIFO and should improve the cache hit rates.
52  * On SMP, it additionally reduces the spinlock operations.
53  *
54  * The c_cpuarray may not be read with enabled local interrupts -
55  * it's changed with a smp_call_function().
56  *
57  * SMP synchronization:
58  *  constructors and destructors are called without any locking.
59  *  Several members in struct kmem_cache and struct slab never change, they
60  *	are accessed without any locking.
61  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
62  *  	and local interrupts are disabled so slab code is preempt-safe.
63  *  The non-constant members are protected with a per-cache irq spinlock.
64  *
65  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
66  * in 2000 - many ideas in the current implementation are derived from
67  * his patch.
68  *
69  * Further notes from the original documentation:
70  *
71  * 11 April '97.  Started multi-threading - markhe
72  *	The global cache-chain is protected by the mutex 'slab_mutex'.
73  *	The sem is only needed when accessing/extending the cache-chain, which
74  *	can never happen inside an interrupt (kmem_cache_create(),
75  *	kmem_cache_shrink() and kmem_cache_reap()).
76  *
77  *	At present, each engine can be growing a cache.  This should be blocked.
78  *
79  * 15 March 2005. NUMA slab allocator.
80  *	Shai Fultheim <shai@scalex86.org>.
81  *	Shobhit Dayal <shobhit@calsoftinc.com>
82  *	Alok N Kataria <alokk@calsoftinc.com>
83  *	Christoph Lameter <christoph@lameter.com>
84  *
85  *	Modified the slab allocator to be node aware on NUMA systems.
86  *	Each node has its own list of partial, free and full slabs.
87  *	All object allocations for a node occur from node specific slab lists.
88  */
89 
90 #include	<linux/slab.h>
91 #include	<linux/mm.h>
92 #include	<linux/poison.h>
93 #include	<linux/swap.h>
94 #include	<linux/cache.h>
95 #include	<linux/interrupt.h>
96 #include	<linux/init.h>
97 #include	<linux/compiler.h>
98 #include	<linux/cpuset.h>
99 #include	<linux/proc_fs.h>
100 #include	<linux/seq_file.h>
101 #include	<linux/notifier.h>
102 #include	<linux/kallsyms.h>
103 #include	<linux/cpu.h>
104 #include	<linux/sysctl.h>
105 #include	<linux/module.h>
106 #include	<linux/rcupdate.h>
107 #include	<linux/string.h>
108 #include	<linux/uaccess.h>
109 #include	<linux/nodemask.h>
110 #include	<linux/kmemleak.h>
111 #include	<linux/mempolicy.h>
112 #include	<linux/mutex.h>
113 #include	<linux/fault-inject.h>
114 #include	<linux/rtmutex.h>
115 #include	<linux/reciprocal_div.h>
116 #include	<linux/debugobjects.h>
117 #include	<linux/kmemcheck.h>
118 #include	<linux/memory.h>
119 #include	<linux/prefetch.h>
120 #include	<linux/sched/task_stack.h>
121 
122 #include	<net/sock.h>
123 
124 #include	<asm/cacheflush.h>
125 #include	<asm/tlbflush.h>
126 #include	<asm/page.h>
127 
128 #include <trace/events/kmem.h>
129 
130 #include	"internal.h"
131 
132 #include	"slab.h"
133 
134 /*
135  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
136  *		  0 for faster, smaller code (especially in the critical paths).
137  *
138  * STATS	- 1 to collect stats for /proc/slabinfo.
139  *		  0 for faster, smaller code (especially in the critical paths).
140  *
141  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
142  */
143 
144 #ifdef CONFIG_DEBUG_SLAB
145 #define	DEBUG		1
146 #define	STATS		1
147 #define	FORCED_DEBUG	1
148 #else
149 #define	DEBUG		0
150 #define	STATS		0
151 #define	FORCED_DEBUG	0
152 #endif
153 
154 /* Shouldn't this be in a header file somewhere? */
155 #define	BYTES_PER_WORD		sizeof(void *)
156 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
157 
158 #ifndef ARCH_KMALLOC_FLAGS
159 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
160 #endif
161 
162 #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
163 				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
164 
165 #if FREELIST_BYTE_INDEX
166 typedef unsigned char freelist_idx_t;
167 #else
168 typedef unsigned short freelist_idx_t;
169 #endif
170 
171 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
172 
173 /*
174  * struct array_cache
175  *
176  * Purpose:
177  * - LIFO ordering, to hand out cache-warm objects from _alloc
178  * - reduce the number of linked list operations
179  * - reduce spinlock operations
180  *
181  * The limit is stored in the per-cpu structure to reduce the data cache
182  * footprint.
183  *
184  */
185 struct array_cache {
186 	unsigned int avail;
187 	unsigned int limit;
188 	unsigned int batchcount;
189 	unsigned int touched;
190 	void *entry[];	/*
191 			 * Must have this definition in here for the proper
192 			 * alignment of array_cache. Also simplifies accessing
193 			 * the entries.
194 			 */
195 };
196 
197 struct alien_cache {
198 	spinlock_t lock;
199 	struct array_cache ac;
200 };
201 
202 /*
203  * Need this for bootstrapping a per node allocator.
204  */
205 #define NUM_INIT_LISTS (2 * MAX_NUMNODES)
206 static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
207 #define	CACHE_CACHE 0
208 #define	SIZE_NODE (MAX_NUMNODES)
209 
210 static int drain_freelist(struct kmem_cache *cache,
211 			struct kmem_cache_node *n, int tofree);
212 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
213 			int node, struct list_head *list);
214 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
216 static void cache_reap(struct work_struct *unused);
217 
218 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
219 						void **list);
220 static inline void fixup_slab_list(struct kmem_cache *cachep,
221 				struct kmem_cache_node *n, struct page *page,
222 				void **list);
223 static int slab_early_init = 1;
224 
225 #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
226 
227 static void kmem_cache_node_init(struct kmem_cache_node *parent)
228 {
229 	INIT_LIST_HEAD(&parent->slabs_full);
230 	INIT_LIST_HEAD(&parent->slabs_partial);
231 	INIT_LIST_HEAD(&parent->slabs_free);
232 	parent->total_slabs = 0;
233 	parent->free_slabs = 0;
234 	parent->shared = NULL;
235 	parent->alien = NULL;
236 	parent->colour_next = 0;
237 	spin_lock_init(&parent->list_lock);
238 	parent->free_objects = 0;
239 	parent->free_touched = 0;
240 }
241 
242 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
243 	do {								\
244 		INIT_LIST_HEAD(listp);					\
245 		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
246 	} while (0)
247 
248 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
249 	do {								\
250 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
251 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
252 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
253 	} while (0)
254 
255 #define CFLGS_OBJFREELIST_SLAB	(0x40000000UL)
256 #define CFLGS_OFF_SLAB		(0x80000000UL)
257 #define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
258 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
259 
260 #define BATCHREFILL_LIMIT	16
261 /*
262  * Optimization question: fewer reaps means less probability for unnessary
263  * cpucache drain/refill cycles.
264  *
265  * OTOH the cpuarrays can contain lots of objects,
266  * which could lock up otherwise freeable slabs.
267  */
268 #define REAPTIMEOUT_AC		(2*HZ)
269 #define REAPTIMEOUT_NODE	(4*HZ)
270 
271 #if STATS
272 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
273 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
274 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
275 #define	STATS_INC_GROWN(x)	((x)->grown++)
276 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
277 #define	STATS_SET_HIGH(x)						\
278 	do {								\
279 		if ((x)->num_active > (x)->high_mark)			\
280 			(x)->high_mark = (x)->num_active;		\
281 	} while (0)
282 #define	STATS_INC_ERR(x)	((x)->errors++)
283 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
284 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
285 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
286 #define	STATS_SET_FREEABLE(x, i)					\
287 	do {								\
288 		if ((x)->max_freeable < i)				\
289 			(x)->max_freeable = i;				\
290 	} while (0)
291 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
292 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
293 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
294 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
295 #else
296 #define	STATS_INC_ACTIVE(x)	do { } while (0)
297 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
298 #define	STATS_INC_ALLOCED(x)	do { } while (0)
299 #define	STATS_INC_GROWN(x)	do { } while (0)
300 #define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
301 #define	STATS_SET_HIGH(x)	do { } while (0)
302 #define	STATS_INC_ERR(x)	do { } while (0)
303 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
304 #define	STATS_INC_NODEFREES(x)	do { } while (0)
305 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
306 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
307 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
308 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
309 #define STATS_INC_FREEHIT(x)	do { } while (0)
310 #define STATS_INC_FREEMISS(x)	do { } while (0)
311 #endif
312 
313 #if DEBUG
314 
315 /*
316  * memory layout of objects:
317  * 0		: objp
318  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
319  * 		the end of an object is aligned with the end of the real
320  * 		allocation. Catches writes behind the end of the allocation.
321  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
322  * 		redzone word.
323  * cachep->obj_offset: The real object.
324  * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
325  * cachep->size - 1* BYTES_PER_WORD: last caller address
326  *					[BYTES_PER_WORD long]
327  */
328 static int obj_offset(struct kmem_cache *cachep)
329 {
330 	return cachep->obj_offset;
331 }
332 
333 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
334 {
335 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
336 	return (unsigned long long*) (objp + obj_offset(cachep) -
337 				      sizeof(unsigned long long));
338 }
339 
340 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
341 {
342 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
343 	if (cachep->flags & SLAB_STORE_USER)
344 		return (unsigned long long *)(objp + cachep->size -
345 					      sizeof(unsigned long long) -
346 					      REDZONE_ALIGN);
347 	return (unsigned long long *) (objp + cachep->size -
348 				       sizeof(unsigned long long));
349 }
350 
351 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
352 {
353 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
354 	return (void **)(objp + cachep->size - BYTES_PER_WORD);
355 }
356 
357 #else
358 
359 #define obj_offset(x)			0
360 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
361 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
362 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
363 
364 #endif
365 
366 #ifdef CONFIG_DEBUG_SLAB_LEAK
367 
368 static inline bool is_store_user_clean(struct kmem_cache *cachep)
369 {
370 	return atomic_read(&cachep->store_user_clean) == 1;
371 }
372 
373 static inline void set_store_user_clean(struct kmem_cache *cachep)
374 {
375 	atomic_set(&cachep->store_user_clean, 1);
376 }
377 
378 static inline void set_store_user_dirty(struct kmem_cache *cachep)
379 {
380 	if (is_store_user_clean(cachep))
381 		atomic_set(&cachep->store_user_clean, 0);
382 }
383 
384 #else
385 static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
386 
387 #endif
388 
389 /*
390  * Do not go above this order unless 0 objects fit into the slab or
391  * overridden on the command line.
392  */
393 #define	SLAB_MAX_ORDER_HI	1
394 #define	SLAB_MAX_ORDER_LO	0
395 static int slab_max_order = SLAB_MAX_ORDER_LO;
396 static bool slab_max_order_set __initdata;
397 
398 static inline struct kmem_cache *virt_to_cache(const void *obj)
399 {
400 	struct page *page = virt_to_head_page(obj);
401 	return page->slab_cache;
402 }
403 
404 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
405 				 unsigned int idx)
406 {
407 	return page->s_mem + cache->size * idx;
408 }
409 
410 /*
411  * We want to avoid an expensive divide : (offset / cache->size)
412  *   Using the fact that size is a constant for a particular cache,
413  *   we can replace (offset / cache->size) by
414  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
415  */
416 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
417 					const struct page *page, void *obj)
418 {
419 	u32 offset = (obj - page->s_mem);
420 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
421 }
422 
423 #define BOOT_CPUCACHE_ENTRIES	1
424 /* internal cache of cache description objs */
425 static struct kmem_cache kmem_cache_boot = {
426 	.batchcount = 1,
427 	.limit = BOOT_CPUCACHE_ENTRIES,
428 	.shared = 1,
429 	.size = sizeof(struct kmem_cache),
430 	.name = "kmem_cache",
431 };
432 
433 static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
434 
435 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
436 {
437 	return this_cpu_ptr(cachep->cpu_cache);
438 }
439 
440 /*
441  * Calculate the number of objects and left-over bytes for a given buffer size.
442  */
443 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
444 		unsigned long flags, size_t *left_over)
445 {
446 	unsigned int num;
447 	size_t slab_size = PAGE_SIZE << gfporder;
448 
449 	/*
450 	 * The slab management structure can be either off the slab or
451 	 * on it. For the latter case, the memory allocated for a
452 	 * slab is used for:
453 	 *
454 	 * - @buffer_size bytes for each object
455 	 * - One freelist_idx_t for each object
456 	 *
457 	 * We don't need to consider alignment of freelist because
458 	 * freelist will be at the end of slab page. The objects will be
459 	 * at the correct alignment.
460 	 *
461 	 * If the slab management structure is off the slab, then the
462 	 * alignment will already be calculated into the size. Because
463 	 * the slabs are all pages aligned, the objects will be at the
464 	 * correct alignment when allocated.
465 	 */
466 	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
467 		num = slab_size / buffer_size;
468 		*left_over = slab_size % buffer_size;
469 	} else {
470 		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
471 		*left_over = slab_size %
472 			(buffer_size + sizeof(freelist_idx_t));
473 	}
474 
475 	return num;
476 }
477 
478 #if DEBUG
479 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
480 
481 static void __slab_error(const char *function, struct kmem_cache *cachep,
482 			char *msg)
483 {
484 	pr_err("slab error in %s(): cache `%s': %s\n",
485 	       function, cachep->name, msg);
486 	dump_stack();
487 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
488 }
489 #endif
490 
491 /*
492  * By default on NUMA we use alien caches to stage the freeing of
493  * objects allocated from other nodes. This causes massive memory
494  * inefficiencies when using fake NUMA setup to split memory into a
495  * large number of small nodes, so it can be disabled on the command
496  * line
497   */
498 
499 static int use_alien_caches __read_mostly = 1;
500 static int __init noaliencache_setup(char *s)
501 {
502 	use_alien_caches = 0;
503 	return 1;
504 }
505 __setup("noaliencache", noaliencache_setup);
506 
507 static int __init slab_max_order_setup(char *str)
508 {
509 	get_option(&str, &slab_max_order);
510 	slab_max_order = slab_max_order < 0 ? 0 :
511 				min(slab_max_order, MAX_ORDER - 1);
512 	slab_max_order_set = true;
513 
514 	return 1;
515 }
516 __setup("slab_max_order=", slab_max_order_setup);
517 
518 #ifdef CONFIG_NUMA
519 /*
520  * Special reaping functions for NUMA systems called from cache_reap().
521  * These take care of doing round robin flushing of alien caches (containing
522  * objects freed on different nodes from which they were allocated) and the
523  * flushing of remote pcps by calling drain_node_pages.
524  */
525 static DEFINE_PER_CPU(unsigned long, slab_reap_node);
526 
527 static void init_reap_node(int cpu)
528 {
529 	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
530 						    node_online_map);
531 }
532 
533 static void next_reap_node(void)
534 {
535 	int node = __this_cpu_read(slab_reap_node);
536 
537 	node = next_node_in(node, node_online_map);
538 	__this_cpu_write(slab_reap_node, node);
539 }
540 
541 #else
542 #define init_reap_node(cpu) do { } while (0)
543 #define next_reap_node(void) do { } while (0)
544 #endif
545 
546 /*
547  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
548  * via the workqueue/eventd.
549  * Add the CPU number into the expiration time to minimize the possibility of
550  * the CPUs getting into lockstep and contending for the global cache chain
551  * lock.
552  */
553 static void start_cpu_timer(int cpu)
554 {
555 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
556 
557 	if (reap_work->work.func == NULL) {
558 		init_reap_node(cpu);
559 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
560 		schedule_delayed_work_on(cpu, reap_work,
561 					__round_jiffies_relative(HZ, cpu));
562 	}
563 }
564 
565 static void init_arraycache(struct array_cache *ac, int limit, int batch)
566 {
567 	/*
568 	 * The array_cache structures contain pointers to free object.
569 	 * However, when such objects are allocated or transferred to another
570 	 * cache the pointers are not cleared and they could be counted as
571 	 * valid references during a kmemleak scan. Therefore, kmemleak must
572 	 * not scan such objects.
573 	 */
574 	kmemleak_no_scan(ac);
575 	if (ac) {
576 		ac->avail = 0;
577 		ac->limit = limit;
578 		ac->batchcount = batch;
579 		ac->touched = 0;
580 	}
581 }
582 
583 static struct array_cache *alloc_arraycache(int node, int entries,
584 					    int batchcount, gfp_t gfp)
585 {
586 	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
587 	struct array_cache *ac = NULL;
588 
589 	ac = kmalloc_node(memsize, gfp, node);
590 	init_arraycache(ac, entries, batchcount);
591 	return ac;
592 }
593 
594 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
595 					struct page *page, void *objp)
596 {
597 	struct kmem_cache_node *n;
598 	int page_node;
599 	LIST_HEAD(list);
600 
601 	page_node = page_to_nid(page);
602 	n = get_node(cachep, page_node);
603 
604 	spin_lock(&n->list_lock);
605 	free_block(cachep, &objp, 1, page_node, &list);
606 	spin_unlock(&n->list_lock);
607 
608 	slabs_destroy(cachep, &list);
609 }
610 
611 /*
612  * Transfer objects in one arraycache to another.
613  * Locking must be handled by the caller.
614  *
615  * Return the number of entries transferred.
616  */
617 static int transfer_objects(struct array_cache *to,
618 		struct array_cache *from, unsigned int max)
619 {
620 	/* Figure out how many entries to transfer */
621 	int nr = min3(from->avail, max, to->limit - to->avail);
622 
623 	if (!nr)
624 		return 0;
625 
626 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
627 			sizeof(void *) *nr);
628 
629 	from->avail -= nr;
630 	to->avail += nr;
631 	return nr;
632 }
633 
634 #ifndef CONFIG_NUMA
635 
636 #define drain_alien_cache(cachep, alien) do { } while (0)
637 #define reap_alien(cachep, n) do { } while (0)
638 
639 static inline struct alien_cache **alloc_alien_cache(int node,
640 						int limit, gfp_t gfp)
641 {
642 	return NULL;
643 }
644 
645 static inline void free_alien_cache(struct alien_cache **ac_ptr)
646 {
647 }
648 
649 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
650 {
651 	return 0;
652 }
653 
654 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
655 		gfp_t flags)
656 {
657 	return NULL;
658 }
659 
660 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
661 		 gfp_t flags, int nodeid)
662 {
663 	return NULL;
664 }
665 
666 static inline gfp_t gfp_exact_node(gfp_t flags)
667 {
668 	return flags & ~__GFP_NOFAIL;
669 }
670 
671 #else	/* CONFIG_NUMA */
672 
673 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
674 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
675 
676 static struct alien_cache *__alloc_alien_cache(int node, int entries,
677 						int batch, gfp_t gfp)
678 {
679 	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
680 	struct alien_cache *alc = NULL;
681 
682 	alc = kmalloc_node(memsize, gfp, node);
683 	init_arraycache(&alc->ac, entries, batch);
684 	spin_lock_init(&alc->lock);
685 	return alc;
686 }
687 
688 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
689 {
690 	struct alien_cache **alc_ptr;
691 	size_t memsize = sizeof(void *) * nr_node_ids;
692 	int i;
693 
694 	if (limit > 1)
695 		limit = 12;
696 	alc_ptr = kzalloc_node(memsize, gfp, node);
697 	if (!alc_ptr)
698 		return NULL;
699 
700 	for_each_node(i) {
701 		if (i == node || !node_online(i))
702 			continue;
703 		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
704 		if (!alc_ptr[i]) {
705 			for (i--; i >= 0; i--)
706 				kfree(alc_ptr[i]);
707 			kfree(alc_ptr);
708 			return NULL;
709 		}
710 	}
711 	return alc_ptr;
712 }
713 
714 static void free_alien_cache(struct alien_cache **alc_ptr)
715 {
716 	int i;
717 
718 	if (!alc_ptr)
719 		return;
720 	for_each_node(i)
721 	    kfree(alc_ptr[i]);
722 	kfree(alc_ptr);
723 }
724 
725 static void __drain_alien_cache(struct kmem_cache *cachep,
726 				struct array_cache *ac, int node,
727 				struct list_head *list)
728 {
729 	struct kmem_cache_node *n = get_node(cachep, node);
730 
731 	if (ac->avail) {
732 		spin_lock(&n->list_lock);
733 		/*
734 		 * Stuff objects into the remote nodes shared array first.
735 		 * That way we could avoid the overhead of putting the objects
736 		 * into the free lists and getting them back later.
737 		 */
738 		if (n->shared)
739 			transfer_objects(n->shared, ac, ac->limit);
740 
741 		free_block(cachep, ac->entry, ac->avail, node, list);
742 		ac->avail = 0;
743 		spin_unlock(&n->list_lock);
744 	}
745 }
746 
747 /*
748  * Called from cache_reap() to regularly drain alien caches round robin.
749  */
750 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
751 {
752 	int node = __this_cpu_read(slab_reap_node);
753 
754 	if (n->alien) {
755 		struct alien_cache *alc = n->alien[node];
756 		struct array_cache *ac;
757 
758 		if (alc) {
759 			ac = &alc->ac;
760 			if (ac->avail && spin_trylock_irq(&alc->lock)) {
761 				LIST_HEAD(list);
762 
763 				__drain_alien_cache(cachep, ac, node, &list);
764 				spin_unlock_irq(&alc->lock);
765 				slabs_destroy(cachep, &list);
766 			}
767 		}
768 	}
769 }
770 
771 static void drain_alien_cache(struct kmem_cache *cachep,
772 				struct alien_cache **alien)
773 {
774 	int i = 0;
775 	struct alien_cache *alc;
776 	struct array_cache *ac;
777 	unsigned long flags;
778 
779 	for_each_online_node(i) {
780 		alc = alien[i];
781 		if (alc) {
782 			LIST_HEAD(list);
783 
784 			ac = &alc->ac;
785 			spin_lock_irqsave(&alc->lock, flags);
786 			__drain_alien_cache(cachep, ac, i, &list);
787 			spin_unlock_irqrestore(&alc->lock, flags);
788 			slabs_destroy(cachep, &list);
789 		}
790 	}
791 }
792 
793 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
794 				int node, int page_node)
795 {
796 	struct kmem_cache_node *n;
797 	struct alien_cache *alien = NULL;
798 	struct array_cache *ac;
799 	LIST_HEAD(list);
800 
801 	n = get_node(cachep, node);
802 	STATS_INC_NODEFREES(cachep);
803 	if (n->alien && n->alien[page_node]) {
804 		alien = n->alien[page_node];
805 		ac = &alien->ac;
806 		spin_lock(&alien->lock);
807 		if (unlikely(ac->avail == ac->limit)) {
808 			STATS_INC_ACOVERFLOW(cachep);
809 			__drain_alien_cache(cachep, ac, page_node, &list);
810 		}
811 		ac->entry[ac->avail++] = objp;
812 		spin_unlock(&alien->lock);
813 		slabs_destroy(cachep, &list);
814 	} else {
815 		n = get_node(cachep, page_node);
816 		spin_lock(&n->list_lock);
817 		free_block(cachep, &objp, 1, page_node, &list);
818 		spin_unlock(&n->list_lock);
819 		slabs_destroy(cachep, &list);
820 	}
821 	return 1;
822 }
823 
824 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
825 {
826 	int page_node = page_to_nid(virt_to_page(objp));
827 	int node = numa_mem_id();
828 	/*
829 	 * Make sure we are not freeing a object from another node to the array
830 	 * cache on this cpu.
831 	 */
832 	if (likely(node == page_node))
833 		return 0;
834 
835 	return __cache_free_alien(cachep, objp, node, page_node);
836 }
837 
838 /*
839  * Construct gfp mask to allocate from a specific node but do not reclaim or
840  * warn about failures.
841  */
842 static inline gfp_t gfp_exact_node(gfp_t flags)
843 {
844 	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
845 }
846 #endif
847 
848 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
849 {
850 	struct kmem_cache_node *n;
851 
852 	/*
853 	 * Set up the kmem_cache_node for cpu before we can
854 	 * begin anything. Make sure some other cpu on this
855 	 * node has not already allocated this
856 	 */
857 	n = get_node(cachep, node);
858 	if (n) {
859 		spin_lock_irq(&n->list_lock);
860 		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
861 				cachep->num;
862 		spin_unlock_irq(&n->list_lock);
863 
864 		return 0;
865 	}
866 
867 	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
868 	if (!n)
869 		return -ENOMEM;
870 
871 	kmem_cache_node_init(n);
872 	n->next_reap = jiffies + REAPTIMEOUT_NODE +
873 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
874 
875 	n->free_limit =
876 		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
877 
878 	/*
879 	 * The kmem_cache_nodes don't come and go as CPUs
880 	 * come and go.  slab_mutex is sufficient
881 	 * protection here.
882 	 */
883 	cachep->node[node] = n;
884 
885 	return 0;
886 }
887 
888 #if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
889 /*
890  * Allocates and initializes node for a node on each slab cache, used for
891  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
892  * will be allocated off-node since memory is not yet online for the new node.
893  * When hotplugging memory or a cpu, existing node are not replaced if
894  * already in use.
895  *
896  * Must hold slab_mutex.
897  */
898 static int init_cache_node_node(int node)
899 {
900 	int ret;
901 	struct kmem_cache *cachep;
902 
903 	list_for_each_entry(cachep, &slab_caches, list) {
904 		ret = init_cache_node(cachep, node, GFP_KERNEL);
905 		if (ret)
906 			return ret;
907 	}
908 
909 	return 0;
910 }
911 #endif
912 
913 static int setup_kmem_cache_node(struct kmem_cache *cachep,
914 				int node, gfp_t gfp, bool force_change)
915 {
916 	int ret = -ENOMEM;
917 	struct kmem_cache_node *n;
918 	struct array_cache *old_shared = NULL;
919 	struct array_cache *new_shared = NULL;
920 	struct alien_cache **new_alien = NULL;
921 	LIST_HEAD(list);
922 
923 	if (use_alien_caches) {
924 		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
925 		if (!new_alien)
926 			goto fail;
927 	}
928 
929 	if (cachep->shared) {
930 		new_shared = alloc_arraycache(node,
931 			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
932 		if (!new_shared)
933 			goto fail;
934 	}
935 
936 	ret = init_cache_node(cachep, node, gfp);
937 	if (ret)
938 		goto fail;
939 
940 	n = get_node(cachep, node);
941 	spin_lock_irq(&n->list_lock);
942 	if (n->shared && force_change) {
943 		free_block(cachep, n->shared->entry,
944 				n->shared->avail, node, &list);
945 		n->shared->avail = 0;
946 	}
947 
948 	if (!n->shared || force_change) {
949 		old_shared = n->shared;
950 		n->shared = new_shared;
951 		new_shared = NULL;
952 	}
953 
954 	if (!n->alien) {
955 		n->alien = new_alien;
956 		new_alien = NULL;
957 	}
958 
959 	spin_unlock_irq(&n->list_lock);
960 	slabs_destroy(cachep, &list);
961 
962 	/*
963 	 * To protect lockless access to n->shared during irq disabled context.
964 	 * If n->shared isn't NULL in irq disabled context, accessing to it is
965 	 * guaranteed to be valid until irq is re-enabled, because it will be
966 	 * freed after synchronize_sched().
967 	 */
968 	if (old_shared && force_change)
969 		synchronize_sched();
970 
971 fail:
972 	kfree(old_shared);
973 	kfree(new_shared);
974 	free_alien_cache(new_alien);
975 
976 	return ret;
977 }
978 
979 #ifdef CONFIG_SMP
980 
981 static void cpuup_canceled(long cpu)
982 {
983 	struct kmem_cache *cachep;
984 	struct kmem_cache_node *n = NULL;
985 	int node = cpu_to_mem(cpu);
986 	const struct cpumask *mask = cpumask_of_node(node);
987 
988 	list_for_each_entry(cachep, &slab_caches, list) {
989 		struct array_cache *nc;
990 		struct array_cache *shared;
991 		struct alien_cache **alien;
992 		LIST_HEAD(list);
993 
994 		n = get_node(cachep, node);
995 		if (!n)
996 			continue;
997 
998 		spin_lock_irq(&n->list_lock);
999 
1000 		/* Free limit for this kmem_cache_node */
1001 		n->free_limit -= cachep->batchcount;
1002 
1003 		/* cpu is dead; no one can alloc from it. */
1004 		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1005 		if (nc) {
1006 			free_block(cachep, nc->entry, nc->avail, node, &list);
1007 			nc->avail = 0;
1008 		}
1009 
1010 		if (!cpumask_empty(mask)) {
1011 			spin_unlock_irq(&n->list_lock);
1012 			goto free_slab;
1013 		}
1014 
1015 		shared = n->shared;
1016 		if (shared) {
1017 			free_block(cachep, shared->entry,
1018 				   shared->avail, node, &list);
1019 			n->shared = NULL;
1020 		}
1021 
1022 		alien = n->alien;
1023 		n->alien = NULL;
1024 
1025 		spin_unlock_irq(&n->list_lock);
1026 
1027 		kfree(shared);
1028 		if (alien) {
1029 			drain_alien_cache(cachep, alien);
1030 			free_alien_cache(alien);
1031 		}
1032 
1033 free_slab:
1034 		slabs_destroy(cachep, &list);
1035 	}
1036 	/*
1037 	 * In the previous loop, all the objects were freed to
1038 	 * the respective cache's slabs,  now we can go ahead and
1039 	 * shrink each nodelist to its limit.
1040 	 */
1041 	list_for_each_entry(cachep, &slab_caches, list) {
1042 		n = get_node(cachep, node);
1043 		if (!n)
1044 			continue;
1045 		drain_freelist(cachep, n, INT_MAX);
1046 	}
1047 }
1048 
1049 static int cpuup_prepare(long cpu)
1050 {
1051 	struct kmem_cache *cachep;
1052 	int node = cpu_to_mem(cpu);
1053 	int err;
1054 
1055 	/*
1056 	 * We need to do this right in the beginning since
1057 	 * alloc_arraycache's are going to use this list.
1058 	 * kmalloc_node allows us to add the slab to the right
1059 	 * kmem_cache_node and not this cpu's kmem_cache_node
1060 	 */
1061 	err = init_cache_node_node(node);
1062 	if (err < 0)
1063 		goto bad;
1064 
1065 	/*
1066 	 * Now we can go ahead with allocating the shared arrays and
1067 	 * array caches
1068 	 */
1069 	list_for_each_entry(cachep, &slab_caches, list) {
1070 		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1071 		if (err)
1072 			goto bad;
1073 	}
1074 
1075 	return 0;
1076 bad:
1077 	cpuup_canceled(cpu);
1078 	return -ENOMEM;
1079 }
1080 
1081 int slab_prepare_cpu(unsigned int cpu)
1082 {
1083 	int err;
1084 
1085 	mutex_lock(&slab_mutex);
1086 	err = cpuup_prepare(cpu);
1087 	mutex_unlock(&slab_mutex);
1088 	return err;
1089 }
1090 
1091 /*
1092  * This is called for a failed online attempt and for a successful
1093  * offline.
1094  *
1095  * Even if all the cpus of a node are down, we don't free the
1096  * kmem_list3 of any cache. This to avoid a race between cpu_down, and
1097  * a kmalloc allocation from another cpu for memory from the node of
1098  * the cpu going down.  The list3 structure is usually allocated from
1099  * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1100  */
1101 int slab_dead_cpu(unsigned int cpu)
1102 {
1103 	mutex_lock(&slab_mutex);
1104 	cpuup_canceled(cpu);
1105 	mutex_unlock(&slab_mutex);
1106 	return 0;
1107 }
1108 #endif
1109 
1110 static int slab_online_cpu(unsigned int cpu)
1111 {
1112 	start_cpu_timer(cpu);
1113 	return 0;
1114 }
1115 
1116 static int slab_offline_cpu(unsigned int cpu)
1117 {
1118 	/*
1119 	 * Shutdown cache reaper. Note that the slab_mutex is held so
1120 	 * that if cache_reap() is invoked it cannot do anything
1121 	 * expensive but will only modify reap_work and reschedule the
1122 	 * timer.
1123 	 */
1124 	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1125 	/* Now the cache_reaper is guaranteed to be not running. */
1126 	per_cpu(slab_reap_work, cpu).work.func = NULL;
1127 	return 0;
1128 }
1129 
1130 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1131 /*
1132  * Drains freelist for a node on each slab cache, used for memory hot-remove.
1133  * Returns -EBUSY if all objects cannot be drained so that the node is not
1134  * removed.
1135  *
1136  * Must hold slab_mutex.
1137  */
1138 static int __meminit drain_cache_node_node(int node)
1139 {
1140 	struct kmem_cache *cachep;
1141 	int ret = 0;
1142 
1143 	list_for_each_entry(cachep, &slab_caches, list) {
1144 		struct kmem_cache_node *n;
1145 
1146 		n = get_node(cachep, node);
1147 		if (!n)
1148 			continue;
1149 
1150 		drain_freelist(cachep, n, INT_MAX);
1151 
1152 		if (!list_empty(&n->slabs_full) ||
1153 		    !list_empty(&n->slabs_partial)) {
1154 			ret = -EBUSY;
1155 			break;
1156 		}
1157 	}
1158 	return ret;
1159 }
1160 
1161 static int __meminit slab_memory_callback(struct notifier_block *self,
1162 					unsigned long action, void *arg)
1163 {
1164 	struct memory_notify *mnb = arg;
1165 	int ret = 0;
1166 	int nid;
1167 
1168 	nid = mnb->status_change_nid;
1169 	if (nid < 0)
1170 		goto out;
1171 
1172 	switch (action) {
1173 	case MEM_GOING_ONLINE:
1174 		mutex_lock(&slab_mutex);
1175 		ret = init_cache_node_node(nid);
1176 		mutex_unlock(&slab_mutex);
1177 		break;
1178 	case MEM_GOING_OFFLINE:
1179 		mutex_lock(&slab_mutex);
1180 		ret = drain_cache_node_node(nid);
1181 		mutex_unlock(&slab_mutex);
1182 		break;
1183 	case MEM_ONLINE:
1184 	case MEM_OFFLINE:
1185 	case MEM_CANCEL_ONLINE:
1186 	case MEM_CANCEL_OFFLINE:
1187 		break;
1188 	}
1189 out:
1190 	return notifier_from_errno(ret);
1191 }
1192 #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1193 
1194 /*
1195  * swap the static kmem_cache_node with kmalloced memory
1196  */
1197 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1198 				int nodeid)
1199 {
1200 	struct kmem_cache_node *ptr;
1201 
1202 	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1203 	BUG_ON(!ptr);
1204 
1205 	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1206 	/*
1207 	 * Do not assume that spinlocks can be initialized via memcpy:
1208 	 */
1209 	spin_lock_init(&ptr->list_lock);
1210 
1211 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1212 	cachep->node[nodeid] = ptr;
1213 }
1214 
1215 /*
1216  * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1217  * size of kmem_cache_node.
1218  */
1219 static void __init set_up_node(struct kmem_cache *cachep, int index)
1220 {
1221 	int node;
1222 
1223 	for_each_online_node(node) {
1224 		cachep->node[node] = &init_kmem_cache_node[index + node];
1225 		cachep->node[node]->next_reap = jiffies +
1226 		    REAPTIMEOUT_NODE +
1227 		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1228 	}
1229 }
1230 
1231 /*
1232  * Initialisation.  Called after the page allocator have been initialised and
1233  * before smp_init().
1234  */
1235 void __init kmem_cache_init(void)
1236 {
1237 	int i;
1238 
1239 	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1240 					sizeof(struct rcu_head));
1241 	kmem_cache = &kmem_cache_boot;
1242 
1243 	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1244 		use_alien_caches = 0;
1245 
1246 	for (i = 0; i < NUM_INIT_LISTS; i++)
1247 		kmem_cache_node_init(&init_kmem_cache_node[i]);
1248 
1249 	/*
1250 	 * Fragmentation resistance on low memory - only use bigger
1251 	 * page orders on machines with more than 32MB of memory if
1252 	 * not overridden on the command line.
1253 	 */
1254 	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1255 		slab_max_order = SLAB_MAX_ORDER_HI;
1256 
1257 	/* Bootstrap is tricky, because several objects are allocated
1258 	 * from caches that do not exist yet:
1259 	 * 1) initialize the kmem_cache cache: it contains the struct
1260 	 *    kmem_cache structures of all caches, except kmem_cache itself:
1261 	 *    kmem_cache is statically allocated.
1262 	 *    Initially an __init data area is used for the head array and the
1263 	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1264 	 *    array at the end of the bootstrap.
1265 	 * 2) Create the first kmalloc cache.
1266 	 *    The struct kmem_cache for the new cache is allocated normally.
1267 	 *    An __init data area is used for the head array.
1268 	 * 3) Create the remaining kmalloc caches, with minimally sized
1269 	 *    head arrays.
1270 	 * 4) Replace the __init data head arrays for kmem_cache and the first
1271 	 *    kmalloc cache with kmalloc allocated arrays.
1272 	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1273 	 *    the other cache's with kmalloc allocated memory.
1274 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1275 	 */
1276 
1277 	/* 1) create the kmem_cache */
1278 
1279 	/*
1280 	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1281 	 */
1282 	create_boot_cache(kmem_cache, "kmem_cache",
1283 		offsetof(struct kmem_cache, node) +
1284 				  nr_node_ids * sizeof(struct kmem_cache_node *),
1285 				  SLAB_HWCACHE_ALIGN);
1286 	list_add(&kmem_cache->list, &slab_caches);
1287 	slab_state = PARTIAL;
1288 
1289 	/*
1290 	 * Initialize the caches that provide memory for the  kmem_cache_node
1291 	 * structures first.  Without this, further allocations will bug.
1292 	 */
1293 	kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
1294 				kmalloc_info[INDEX_NODE].name,
1295 				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1296 	slab_state = PARTIAL_NODE;
1297 	setup_kmalloc_cache_index_table();
1298 
1299 	slab_early_init = 0;
1300 
1301 	/* 5) Replace the bootstrap kmem_cache_node */
1302 	{
1303 		int nid;
1304 
1305 		for_each_online_node(nid) {
1306 			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1307 
1308 			init_list(kmalloc_caches[INDEX_NODE],
1309 					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1310 		}
1311 	}
1312 
1313 	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1314 }
1315 
1316 void __init kmem_cache_init_late(void)
1317 {
1318 	struct kmem_cache *cachep;
1319 
1320 	slab_state = UP;
1321 
1322 	/* 6) resize the head arrays to their final sizes */
1323 	mutex_lock(&slab_mutex);
1324 	list_for_each_entry(cachep, &slab_caches, list)
1325 		if (enable_cpucache(cachep, GFP_NOWAIT))
1326 			BUG();
1327 	mutex_unlock(&slab_mutex);
1328 
1329 	/* Done! */
1330 	slab_state = FULL;
1331 
1332 #ifdef CONFIG_NUMA
1333 	/*
1334 	 * Register a memory hotplug callback that initializes and frees
1335 	 * node.
1336 	 */
1337 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1338 #endif
1339 
1340 	/*
1341 	 * The reap timers are started later, with a module init call: That part
1342 	 * of the kernel is not yet operational.
1343 	 */
1344 }
1345 
1346 static int __init cpucache_init(void)
1347 {
1348 	int ret;
1349 
1350 	/*
1351 	 * Register the timers that return unneeded pages to the page allocator
1352 	 */
1353 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1354 				slab_online_cpu, slab_offline_cpu);
1355 	WARN_ON(ret < 0);
1356 
1357 	/* Done! */
1358 	slab_state = FULL;
1359 	return 0;
1360 }
1361 __initcall(cpucache_init);
1362 
1363 static noinline void
1364 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1365 {
1366 #if DEBUG
1367 	struct kmem_cache_node *n;
1368 	unsigned long flags;
1369 	int node;
1370 	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1371 				      DEFAULT_RATELIMIT_BURST);
1372 
1373 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1374 		return;
1375 
1376 	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1377 		nodeid, gfpflags, &gfpflags);
1378 	pr_warn("  cache: %s, object size: %d, order: %d\n",
1379 		cachep->name, cachep->size, cachep->gfporder);
1380 
1381 	for_each_kmem_cache_node(cachep, node, n) {
1382 		unsigned long total_slabs, free_slabs, free_objs;
1383 
1384 		spin_lock_irqsave(&n->list_lock, flags);
1385 		total_slabs = n->total_slabs;
1386 		free_slabs = n->free_slabs;
1387 		free_objs = n->free_objects;
1388 		spin_unlock_irqrestore(&n->list_lock, flags);
1389 
1390 		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1391 			node, total_slabs - free_slabs, total_slabs,
1392 			(total_slabs * cachep->num) - free_objs,
1393 			total_slabs * cachep->num);
1394 	}
1395 #endif
1396 }
1397 
1398 /*
1399  * Interface to system's page allocator. No need to hold the
1400  * kmem_cache_node ->list_lock.
1401  *
1402  * If we requested dmaable memory, we will get it. Even if we
1403  * did not request dmaable memory, we might get it, but that
1404  * would be relatively rare and ignorable.
1405  */
1406 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1407 								int nodeid)
1408 {
1409 	struct page *page;
1410 	int nr_pages;
1411 
1412 	flags |= cachep->allocflags;
1413 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1414 		flags |= __GFP_RECLAIMABLE;
1415 
1416 	page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1417 	if (!page) {
1418 		slab_out_of_memory(cachep, flags, nodeid);
1419 		return NULL;
1420 	}
1421 
1422 	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1423 		__free_pages(page, cachep->gfporder);
1424 		return NULL;
1425 	}
1426 
1427 	nr_pages = (1 << cachep->gfporder);
1428 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1429 		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1430 	else
1431 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1432 
1433 	__SetPageSlab(page);
1434 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1435 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1436 		SetPageSlabPfmemalloc(page);
1437 
1438 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1439 		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1440 
1441 		if (cachep->ctor)
1442 			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1443 		else
1444 			kmemcheck_mark_unallocated_pages(page, nr_pages);
1445 	}
1446 
1447 	return page;
1448 }
1449 
1450 /*
1451  * Interface to system's page release.
1452  */
1453 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1454 {
1455 	int order = cachep->gfporder;
1456 	unsigned long nr_freed = (1 << order);
1457 
1458 	kmemcheck_free_shadow(page, order);
1459 
1460 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1461 		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1462 	else
1463 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
1464 
1465 	BUG_ON(!PageSlab(page));
1466 	__ClearPageSlabPfmemalloc(page);
1467 	__ClearPageSlab(page);
1468 	page_mapcount_reset(page);
1469 	page->mapping = NULL;
1470 
1471 	if (current->reclaim_state)
1472 		current->reclaim_state->reclaimed_slab += nr_freed;
1473 	memcg_uncharge_slab(page, order, cachep);
1474 	__free_pages(page, order);
1475 }
1476 
1477 static void kmem_rcu_free(struct rcu_head *head)
1478 {
1479 	struct kmem_cache *cachep;
1480 	struct page *page;
1481 
1482 	page = container_of(head, struct page, rcu_head);
1483 	cachep = page->slab_cache;
1484 
1485 	kmem_freepages(cachep, page);
1486 }
1487 
1488 #if DEBUG
1489 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1490 {
1491 	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1492 		(cachep->size % PAGE_SIZE) == 0)
1493 		return true;
1494 
1495 	return false;
1496 }
1497 
1498 #ifdef CONFIG_DEBUG_PAGEALLOC
1499 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1500 			    unsigned long caller)
1501 {
1502 	int size = cachep->object_size;
1503 
1504 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1505 
1506 	if (size < 5 * sizeof(unsigned long))
1507 		return;
1508 
1509 	*addr++ = 0x12345678;
1510 	*addr++ = caller;
1511 	*addr++ = smp_processor_id();
1512 	size -= 3 * sizeof(unsigned long);
1513 	{
1514 		unsigned long *sptr = &caller;
1515 		unsigned long svalue;
1516 
1517 		while (!kstack_end(sptr)) {
1518 			svalue = *sptr++;
1519 			if (kernel_text_address(svalue)) {
1520 				*addr++ = svalue;
1521 				size -= sizeof(unsigned long);
1522 				if (size <= sizeof(unsigned long))
1523 					break;
1524 			}
1525 		}
1526 
1527 	}
1528 	*addr++ = 0x87654321;
1529 }
1530 
1531 static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1532 				int map, unsigned long caller)
1533 {
1534 	if (!is_debug_pagealloc_cache(cachep))
1535 		return;
1536 
1537 	if (caller)
1538 		store_stackinfo(cachep, objp, caller);
1539 
1540 	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1541 }
1542 
1543 #else
1544 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1545 				int map, unsigned long caller) {}
1546 
1547 #endif
1548 
1549 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1550 {
1551 	int size = cachep->object_size;
1552 	addr = &((char *)addr)[obj_offset(cachep)];
1553 
1554 	memset(addr, val, size);
1555 	*(unsigned char *)(addr + size - 1) = POISON_END;
1556 }
1557 
1558 static void dump_line(char *data, int offset, int limit)
1559 {
1560 	int i;
1561 	unsigned char error = 0;
1562 	int bad_count = 0;
1563 
1564 	pr_err("%03x: ", offset);
1565 	for (i = 0; i < limit; i++) {
1566 		if (data[offset + i] != POISON_FREE) {
1567 			error = data[offset + i];
1568 			bad_count++;
1569 		}
1570 	}
1571 	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1572 			&data[offset], limit, 1);
1573 
1574 	if (bad_count == 1) {
1575 		error ^= POISON_FREE;
1576 		if (!(error & (error - 1))) {
1577 			pr_err("Single bit error detected. Probably bad RAM.\n");
1578 #ifdef CONFIG_X86
1579 			pr_err("Run memtest86+ or a similar memory test tool.\n");
1580 #else
1581 			pr_err("Run a memory test tool.\n");
1582 #endif
1583 		}
1584 	}
1585 }
1586 #endif
1587 
1588 #if DEBUG
1589 
1590 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1591 {
1592 	int i, size;
1593 	char *realobj;
1594 
1595 	if (cachep->flags & SLAB_RED_ZONE) {
1596 		pr_err("Redzone: 0x%llx/0x%llx\n",
1597 		       *dbg_redzone1(cachep, objp),
1598 		       *dbg_redzone2(cachep, objp));
1599 	}
1600 
1601 	if (cachep->flags & SLAB_STORE_USER) {
1602 		pr_err("Last user: [<%p>](%pSR)\n",
1603 		       *dbg_userword(cachep, objp),
1604 		       *dbg_userword(cachep, objp));
1605 	}
1606 	realobj = (char *)objp + obj_offset(cachep);
1607 	size = cachep->object_size;
1608 	for (i = 0; i < size && lines; i += 16, lines--) {
1609 		int limit;
1610 		limit = 16;
1611 		if (i + limit > size)
1612 			limit = size - i;
1613 		dump_line(realobj, i, limit);
1614 	}
1615 }
1616 
1617 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1618 {
1619 	char *realobj;
1620 	int size, i;
1621 	int lines = 0;
1622 
1623 	if (is_debug_pagealloc_cache(cachep))
1624 		return;
1625 
1626 	realobj = (char *)objp + obj_offset(cachep);
1627 	size = cachep->object_size;
1628 
1629 	for (i = 0; i < size; i++) {
1630 		char exp = POISON_FREE;
1631 		if (i == size - 1)
1632 			exp = POISON_END;
1633 		if (realobj[i] != exp) {
1634 			int limit;
1635 			/* Mismatch ! */
1636 			/* Print header */
1637 			if (lines == 0) {
1638 				pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1639 				       print_tainted(), cachep->name,
1640 				       realobj, size);
1641 				print_objinfo(cachep, objp, 0);
1642 			}
1643 			/* Hexdump the affected line */
1644 			i = (i / 16) * 16;
1645 			limit = 16;
1646 			if (i + limit > size)
1647 				limit = size - i;
1648 			dump_line(realobj, i, limit);
1649 			i += 16;
1650 			lines++;
1651 			/* Limit to 5 lines */
1652 			if (lines > 5)
1653 				break;
1654 		}
1655 	}
1656 	if (lines != 0) {
1657 		/* Print some data about the neighboring objects, if they
1658 		 * exist:
1659 		 */
1660 		struct page *page = virt_to_head_page(objp);
1661 		unsigned int objnr;
1662 
1663 		objnr = obj_to_index(cachep, page, objp);
1664 		if (objnr) {
1665 			objp = index_to_obj(cachep, page, objnr - 1);
1666 			realobj = (char *)objp + obj_offset(cachep);
1667 			pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
1668 			print_objinfo(cachep, objp, 2);
1669 		}
1670 		if (objnr + 1 < cachep->num) {
1671 			objp = index_to_obj(cachep, page, objnr + 1);
1672 			realobj = (char *)objp + obj_offset(cachep);
1673 			pr_err("Next obj: start=%p, len=%d\n", realobj, size);
1674 			print_objinfo(cachep, objp, 2);
1675 		}
1676 	}
1677 }
1678 #endif
1679 
1680 #if DEBUG
1681 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1682 						struct page *page)
1683 {
1684 	int i;
1685 
1686 	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1687 		poison_obj(cachep, page->freelist - obj_offset(cachep),
1688 			POISON_FREE);
1689 	}
1690 
1691 	for (i = 0; i < cachep->num; i++) {
1692 		void *objp = index_to_obj(cachep, page, i);
1693 
1694 		if (cachep->flags & SLAB_POISON) {
1695 			check_poison_obj(cachep, objp);
1696 			slab_kernel_map(cachep, objp, 1, 0);
1697 		}
1698 		if (cachep->flags & SLAB_RED_ZONE) {
1699 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1700 				slab_error(cachep, "start of a freed object was overwritten");
1701 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1702 				slab_error(cachep, "end of a freed object was overwritten");
1703 		}
1704 	}
1705 }
1706 #else
1707 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1708 						struct page *page)
1709 {
1710 }
1711 #endif
1712 
1713 /**
1714  * slab_destroy - destroy and release all objects in a slab
1715  * @cachep: cache pointer being destroyed
1716  * @page: page pointer being destroyed
1717  *
1718  * Destroy all the objs in a slab page, and release the mem back to the system.
1719  * Before calling the slab page must have been unlinked from the cache. The
1720  * kmem_cache_node ->list_lock is not held/needed.
1721  */
1722 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1723 {
1724 	void *freelist;
1725 
1726 	freelist = page->freelist;
1727 	slab_destroy_debugcheck(cachep, page);
1728 	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1729 		call_rcu(&page->rcu_head, kmem_rcu_free);
1730 	else
1731 		kmem_freepages(cachep, page);
1732 
1733 	/*
1734 	 * From now on, we don't use freelist
1735 	 * although actual page can be freed in rcu context
1736 	 */
1737 	if (OFF_SLAB(cachep))
1738 		kmem_cache_free(cachep->freelist_cache, freelist);
1739 }
1740 
1741 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1742 {
1743 	struct page *page, *n;
1744 
1745 	list_for_each_entry_safe(page, n, list, lru) {
1746 		list_del(&page->lru);
1747 		slab_destroy(cachep, page);
1748 	}
1749 }
1750 
1751 /**
1752  * calculate_slab_order - calculate size (page order) of slabs
1753  * @cachep: pointer to the cache that is being created
1754  * @size: size of objects to be created in this cache.
1755  * @flags: slab allocation flags
1756  *
1757  * Also calculates the number of objects per slab.
1758  *
1759  * This could be made much more intelligent.  For now, try to avoid using
1760  * high order pages for slabs.  When the gfp() functions are more friendly
1761  * towards high-order requests, this should be changed.
1762  */
1763 static size_t calculate_slab_order(struct kmem_cache *cachep,
1764 				size_t size, unsigned long flags)
1765 {
1766 	size_t left_over = 0;
1767 	int gfporder;
1768 
1769 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1770 		unsigned int num;
1771 		size_t remainder;
1772 
1773 		num = cache_estimate(gfporder, size, flags, &remainder);
1774 		if (!num)
1775 			continue;
1776 
1777 		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1778 		if (num > SLAB_OBJ_MAX_NUM)
1779 			break;
1780 
1781 		if (flags & CFLGS_OFF_SLAB) {
1782 			struct kmem_cache *freelist_cache;
1783 			size_t freelist_size;
1784 
1785 			freelist_size = num * sizeof(freelist_idx_t);
1786 			freelist_cache = kmalloc_slab(freelist_size, 0u);
1787 			if (!freelist_cache)
1788 				continue;
1789 
1790 			/*
1791 			 * Needed to avoid possible looping condition
1792 			 * in cache_grow_begin()
1793 			 */
1794 			if (OFF_SLAB(freelist_cache))
1795 				continue;
1796 
1797 			/* check if off slab has enough benefit */
1798 			if (freelist_cache->size > cachep->size / 2)
1799 				continue;
1800 		}
1801 
1802 		/* Found something acceptable - save it away */
1803 		cachep->num = num;
1804 		cachep->gfporder = gfporder;
1805 		left_over = remainder;
1806 
1807 		/*
1808 		 * A VFS-reclaimable slab tends to have most allocations
1809 		 * as GFP_NOFS and we really don't want to have to be allocating
1810 		 * higher-order pages when we are unable to shrink dcache.
1811 		 */
1812 		if (flags & SLAB_RECLAIM_ACCOUNT)
1813 			break;
1814 
1815 		/*
1816 		 * Large number of objects is good, but very large slabs are
1817 		 * currently bad for the gfp()s.
1818 		 */
1819 		if (gfporder >= slab_max_order)
1820 			break;
1821 
1822 		/*
1823 		 * Acceptable internal fragmentation?
1824 		 */
1825 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1826 			break;
1827 	}
1828 	return left_over;
1829 }
1830 
1831 static struct array_cache __percpu *alloc_kmem_cache_cpus(
1832 		struct kmem_cache *cachep, int entries, int batchcount)
1833 {
1834 	int cpu;
1835 	size_t size;
1836 	struct array_cache __percpu *cpu_cache;
1837 
1838 	size = sizeof(void *) * entries + sizeof(struct array_cache);
1839 	cpu_cache = __alloc_percpu(size, sizeof(void *));
1840 
1841 	if (!cpu_cache)
1842 		return NULL;
1843 
1844 	for_each_possible_cpu(cpu) {
1845 		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1846 				entries, batchcount);
1847 	}
1848 
1849 	return cpu_cache;
1850 }
1851 
1852 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1853 {
1854 	if (slab_state >= FULL)
1855 		return enable_cpucache(cachep, gfp);
1856 
1857 	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1858 	if (!cachep->cpu_cache)
1859 		return 1;
1860 
1861 	if (slab_state == DOWN) {
1862 		/* Creation of first cache (kmem_cache). */
1863 		set_up_node(kmem_cache, CACHE_CACHE);
1864 	} else if (slab_state == PARTIAL) {
1865 		/* For kmem_cache_node */
1866 		set_up_node(cachep, SIZE_NODE);
1867 	} else {
1868 		int node;
1869 
1870 		for_each_online_node(node) {
1871 			cachep->node[node] = kmalloc_node(
1872 				sizeof(struct kmem_cache_node), gfp, node);
1873 			BUG_ON(!cachep->node[node]);
1874 			kmem_cache_node_init(cachep->node[node]);
1875 		}
1876 	}
1877 
1878 	cachep->node[numa_mem_id()]->next_reap =
1879 			jiffies + REAPTIMEOUT_NODE +
1880 			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1881 
1882 	cpu_cache_get(cachep)->avail = 0;
1883 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1884 	cpu_cache_get(cachep)->batchcount = 1;
1885 	cpu_cache_get(cachep)->touched = 0;
1886 	cachep->batchcount = 1;
1887 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1888 	return 0;
1889 }
1890 
1891 unsigned long kmem_cache_flags(unsigned long object_size,
1892 	unsigned long flags, const char *name,
1893 	void (*ctor)(void *))
1894 {
1895 	return flags;
1896 }
1897 
1898 struct kmem_cache *
1899 __kmem_cache_alias(const char *name, size_t size, size_t align,
1900 		   unsigned long flags, void (*ctor)(void *))
1901 {
1902 	struct kmem_cache *cachep;
1903 
1904 	cachep = find_mergeable(size, align, flags, name, ctor);
1905 	if (cachep) {
1906 		cachep->refcount++;
1907 
1908 		/*
1909 		 * Adjust the object sizes so that we clear
1910 		 * the complete object on kzalloc.
1911 		 */
1912 		cachep->object_size = max_t(int, cachep->object_size, size);
1913 	}
1914 	return cachep;
1915 }
1916 
1917 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1918 			size_t size, unsigned long flags)
1919 {
1920 	size_t left;
1921 
1922 	cachep->num = 0;
1923 
1924 	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1925 		return false;
1926 
1927 	left = calculate_slab_order(cachep, size,
1928 			flags | CFLGS_OBJFREELIST_SLAB);
1929 	if (!cachep->num)
1930 		return false;
1931 
1932 	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1933 		return false;
1934 
1935 	cachep->colour = left / cachep->colour_off;
1936 
1937 	return true;
1938 }
1939 
1940 static bool set_off_slab_cache(struct kmem_cache *cachep,
1941 			size_t size, unsigned long flags)
1942 {
1943 	size_t left;
1944 
1945 	cachep->num = 0;
1946 
1947 	/*
1948 	 * Always use on-slab management when SLAB_NOLEAKTRACE
1949 	 * to avoid recursive calls into kmemleak.
1950 	 */
1951 	if (flags & SLAB_NOLEAKTRACE)
1952 		return false;
1953 
1954 	/*
1955 	 * Size is large, assume best to place the slab management obj
1956 	 * off-slab (should allow better packing of objs).
1957 	 */
1958 	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1959 	if (!cachep->num)
1960 		return false;
1961 
1962 	/*
1963 	 * If the slab has been placed off-slab, and we have enough space then
1964 	 * move it on-slab. This is at the expense of any extra colouring.
1965 	 */
1966 	if (left >= cachep->num * sizeof(freelist_idx_t))
1967 		return false;
1968 
1969 	cachep->colour = left / cachep->colour_off;
1970 
1971 	return true;
1972 }
1973 
1974 static bool set_on_slab_cache(struct kmem_cache *cachep,
1975 			size_t size, unsigned long flags)
1976 {
1977 	size_t left;
1978 
1979 	cachep->num = 0;
1980 
1981 	left = calculate_slab_order(cachep, size, flags);
1982 	if (!cachep->num)
1983 		return false;
1984 
1985 	cachep->colour = left / cachep->colour_off;
1986 
1987 	return true;
1988 }
1989 
1990 /**
1991  * __kmem_cache_create - Create a cache.
1992  * @cachep: cache management descriptor
1993  * @flags: SLAB flags
1994  *
1995  * Returns a ptr to the cache on success, NULL on failure.
1996  * Cannot be called within a int, but can be interrupted.
1997  * The @ctor is run when new pages are allocated by the cache.
1998  *
1999  * The flags are
2000  *
2001  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2002  * to catch references to uninitialised memory.
2003  *
2004  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2005  * for buffer overruns.
2006  *
2007  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2008  * cacheline.  This can be beneficial if you're counting cycles as closely
2009  * as davem.
2010  */
2011 int
2012 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2013 {
2014 	size_t ralign = BYTES_PER_WORD;
2015 	gfp_t gfp;
2016 	int err;
2017 	size_t size = cachep->size;
2018 
2019 #if DEBUG
2020 #if FORCED_DEBUG
2021 	/*
2022 	 * Enable redzoning and last user accounting, except for caches with
2023 	 * large objects, if the increased size would increase the object size
2024 	 * above the next power of two: caches with object sizes just above a
2025 	 * power of two have a significant amount of internal fragmentation.
2026 	 */
2027 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2028 						2 * sizeof(unsigned long long)))
2029 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2030 	if (!(flags & SLAB_TYPESAFE_BY_RCU))
2031 		flags |= SLAB_POISON;
2032 #endif
2033 #endif
2034 
2035 	/*
2036 	 * Check that size is in terms of words.  This is needed to avoid
2037 	 * unaligned accesses for some archs when redzoning is used, and makes
2038 	 * sure any on-slab bufctl's are also correctly aligned.
2039 	 */
2040 	size = ALIGN(size, BYTES_PER_WORD);
2041 
2042 	if (flags & SLAB_RED_ZONE) {
2043 		ralign = REDZONE_ALIGN;
2044 		/* If redzoning, ensure that the second redzone is suitably
2045 		 * aligned, by adjusting the object size accordingly. */
2046 		size = ALIGN(size, REDZONE_ALIGN);
2047 	}
2048 
2049 	/* 3) caller mandated alignment */
2050 	if (ralign < cachep->align) {
2051 		ralign = cachep->align;
2052 	}
2053 	/* disable debug if necessary */
2054 	if (ralign > __alignof__(unsigned long long))
2055 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2056 	/*
2057 	 * 4) Store it.
2058 	 */
2059 	cachep->align = ralign;
2060 	cachep->colour_off = cache_line_size();
2061 	/* Offset must be a multiple of the alignment. */
2062 	if (cachep->colour_off < cachep->align)
2063 		cachep->colour_off = cachep->align;
2064 
2065 	if (slab_is_available())
2066 		gfp = GFP_KERNEL;
2067 	else
2068 		gfp = GFP_NOWAIT;
2069 
2070 #if DEBUG
2071 
2072 	/*
2073 	 * Both debugging options require word-alignment which is calculated
2074 	 * into align above.
2075 	 */
2076 	if (flags & SLAB_RED_ZONE) {
2077 		/* add space for red zone words */
2078 		cachep->obj_offset += sizeof(unsigned long long);
2079 		size += 2 * sizeof(unsigned long long);
2080 	}
2081 	if (flags & SLAB_STORE_USER) {
2082 		/* user store requires one word storage behind the end of
2083 		 * the real object. But if the second red zone needs to be
2084 		 * aligned to 64 bits, we must allow that much space.
2085 		 */
2086 		if (flags & SLAB_RED_ZONE)
2087 			size += REDZONE_ALIGN;
2088 		else
2089 			size += BYTES_PER_WORD;
2090 	}
2091 #endif
2092 
2093 	kasan_cache_create(cachep, &size, &flags);
2094 
2095 	size = ALIGN(size, cachep->align);
2096 	/*
2097 	 * We should restrict the number of objects in a slab to implement
2098 	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2099 	 */
2100 	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2101 		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2102 
2103 #if DEBUG
2104 	/*
2105 	 * To activate debug pagealloc, off-slab management is necessary
2106 	 * requirement. In early phase of initialization, small sized slab
2107 	 * doesn't get initialized so it would not be possible. So, we need
2108 	 * to check size >= 256. It guarantees that all necessary small
2109 	 * sized slab is initialized in current slab initialization sequence.
2110 	 */
2111 	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2112 		size >= 256 && cachep->object_size > cache_line_size()) {
2113 		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2114 			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2115 
2116 			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2117 				flags |= CFLGS_OFF_SLAB;
2118 				cachep->obj_offset += tmp_size - size;
2119 				size = tmp_size;
2120 				goto done;
2121 			}
2122 		}
2123 	}
2124 #endif
2125 
2126 	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2127 		flags |= CFLGS_OBJFREELIST_SLAB;
2128 		goto done;
2129 	}
2130 
2131 	if (set_off_slab_cache(cachep, size, flags)) {
2132 		flags |= CFLGS_OFF_SLAB;
2133 		goto done;
2134 	}
2135 
2136 	if (set_on_slab_cache(cachep, size, flags))
2137 		goto done;
2138 
2139 	return -E2BIG;
2140 
2141 done:
2142 	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2143 	cachep->flags = flags;
2144 	cachep->allocflags = __GFP_COMP;
2145 	if (flags & SLAB_CACHE_DMA)
2146 		cachep->allocflags |= GFP_DMA;
2147 	cachep->size = size;
2148 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2149 
2150 #if DEBUG
2151 	/*
2152 	 * If we're going to use the generic kernel_map_pages()
2153 	 * poisoning, then it's going to smash the contents of
2154 	 * the redzone and userword anyhow, so switch them off.
2155 	 */
2156 	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2157 		(cachep->flags & SLAB_POISON) &&
2158 		is_debug_pagealloc_cache(cachep))
2159 		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2160 #endif
2161 
2162 	if (OFF_SLAB(cachep)) {
2163 		cachep->freelist_cache =
2164 			kmalloc_slab(cachep->freelist_size, 0u);
2165 	}
2166 
2167 	err = setup_cpu_cache(cachep, gfp);
2168 	if (err) {
2169 		__kmem_cache_release(cachep);
2170 		return err;
2171 	}
2172 
2173 	return 0;
2174 }
2175 
2176 #if DEBUG
2177 static void check_irq_off(void)
2178 {
2179 	BUG_ON(!irqs_disabled());
2180 }
2181 
2182 static void check_irq_on(void)
2183 {
2184 	BUG_ON(irqs_disabled());
2185 }
2186 
2187 static void check_mutex_acquired(void)
2188 {
2189 	BUG_ON(!mutex_is_locked(&slab_mutex));
2190 }
2191 
2192 static void check_spinlock_acquired(struct kmem_cache *cachep)
2193 {
2194 #ifdef CONFIG_SMP
2195 	check_irq_off();
2196 	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2197 #endif
2198 }
2199 
2200 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2201 {
2202 #ifdef CONFIG_SMP
2203 	check_irq_off();
2204 	assert_spin_locked(&get_node(cachep, node)->list_lock);
2205 #endif
2206 }
2207 
2208 #else
2209 #define check_irq_off()	do { } while(0)
2210 #define check_irq_on()	do { } while(0)
2211 #define check_mutex_acquired()	do { } while(0)
2212 #define check_spinlock_acquired(x) do { } while(0)
2213 #define check_spinlock_acquired_node(x, y) do { } while(0)
2214 #endif
2215 
2216 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2217 				int node, bool free_all, struct list_head *list)
2218 {
2219 	int tofree;
2220 
2221 	if (!ac || !ac->avail)
2222 		return;
2223 
2224 	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2225 	if (tofree > ac->avail)
2226 		tofree = (ac->avail + 1) / 2;
2227 
2228 	free_block(cachep, ac->entry, tofree, node, list);
2229 	ac->avail -= tofree;
2230 	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2231 }
2232 
2233 static void do_drain(void *arg)
2234 {
2235 	struct kmem_cache *cachep = arg;
2236 	struct array_cache *ac;
2237 	int node = numa_mem_id();
2238 	struct kmem_cache_node *n;
2239 	LIST_HEAD(list);
2240 
2241 	check_irq_off();
2242 	ac = cpu_cache_get(cachep);
2243 	n = get_node(cachep, node);
2244 	spin_lock(&n->list_lock);
2245 	free_block(cachep, ac->entry, ac->avail, node, &list);
2246 	spin_unlock(&n->list_lock);
2247 	slabs_destroy(cachep, &list);
2248 	ac->avail = 0;
2249 }
2250 
2251 static void drain_cpu_caches(struct kmem_cache *cachep)
2252 {
2253 	struct kmem_cache_node *n;
2254 	int node;
2255 	LIST_HEAD(list);
2256 
2257 	on_each_cpu(do_drain, cachep, 1);
2258 	check_irq_on();
2259 	for_each_kmem_cache_node(cachep, node, n)
2260 		if (n->alien)
2261 			drain_alien_cache(cachep, n->alien);
2262 
2263 	for_each_kmem_cache_node(cachep, node, n) {
2264 		spin_lock_irq(&n->list_lock);
2265 		drain_array_locked(cachep, n->shared, node, true, &list);
2266 		spin_unlock_irq(&n->list_lock);
2267 
2268 		slabs_destroy(cachep, &list);
2269 	}
2270 }
2271 
2272 /*
2273  * Remove slabs from the list of free slabs.
2274  * Specify the number of slabs to drain in tofree.
2275  *
2276  * Returns the actual number of slabs released.
2277  */
2278 static int drain_freelist(struct kmem_cache *cache,
2279 			struct kmem_cache_node *n, int tofree)
2280 {
2281 	struct list_head *p;
2282 	int nr_freed;
2283 	struct page *page;
2284 
2285 	nr_freed = 0;
2286 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2287 
2288 		spin_lock_irq(&n->list_lock);
2289 		p = n->slabs_free.prev;
2290 		if (p == &n->slabs_free) {
2291 			spin_unlock_irq(&n->list_lock);
2292 			goto out;
2293 		}
2294 
2295 		page = list_entry(p, struct page, lru);
2296 		list_del(&page->lru);
2297 		n->free_slabs--;
2298 		n->total_slabs--;
2299 		/*
2300 		 * Safe to drop the lock. The slab is no longer linked
2301 		 * to the cache.
2302 		 */
2303 		n->free_objects -= cache->num;
2304 		spin_unlock_irq(&n->list_lock);
2305 		slab_destroy(cache, page);
2306 		nr_freed++;
2307 	}
2308 out:
2309 	return nr_freed;
2310 }
2311 
2312 int __kmem_cache_shrink(struct kmem_cache *cachep)
2313 {
2314 	int ret = 0;
2315 	int node;
2316 	struct kmem_cache_node *n;
2317 
2318 	drain_cpu_caches(cachep);
2319 
2320 	check_irq_on();
2321 	for_each_kmem_cache_node(cachep, node, n) {
2322 		drain_freelist(cachep, n, INT_MAX);
2323 
2324 		ret += !list_empty(&n->slabs_full) ||
2325 			!list_empty(&n->slabs_partial);
2326 	}
2327 	return (ret ? 1 : 0);
2328 }
2329 
2330 #ifdef CONFIG_MEMCG
2331 void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2332 {
2333 	__kmem_cache_shrink(cachep);
2334 }
2335 #endif
2336 
2337 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2338 {
2339 	return __kmem_cache_shrink(cachep);
2340 }
2341 
2342 void __kmem_cache_release(struct kmem_cache *cachep)
2343 {
2344 	int i;
2345 	struct kmem_cache_node *n;
2346 
2347 	cache_random_seq_destroy(cachep);
2348 
2349 	free_percpu(cachep->cpu_cache);
2350 
2351 	/* NUMA: free the node structures */
2352 	for_each_kmem_cache_node(cachep, i, n) {
2353 		kfree(n->shared);
2354 		free_alien_cache(n->alien);
2355 		kfree(n);
2356 		cachep->node[i] = NULL;
2357 	}
2358 }
2359 
2360 /*
2361  * Get the memory for a slab management obj.
2362  *
2363  * For a slab cache when the slab descriptor is off-slab, the
2364  * slab descriptor can't come from the same cache which is being created,
2365  * Because if it is the case, that means we defer the creation of
2366  * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2367  * And we eventually call down to __kmem_cache_create(), which
2368  * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2369  * This is a "chicken-and-egg" problem.
2370  *
2371  * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2372  * which are all initialized during kmem_cache_init().
2373  */
2374 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2375 				   struct page *page, int colour_off,
2376 				   gfp_t local_flags, int nodeid)
2377 {
2378 	void *freelist;
2379 	void *addr = page_address(page);
2380 
2381 	page->s_mem = addr + colour_off;
2382 	page->active = 0;
2383 
2384 	if (OBJFREELIST_SLAB(cachep))
2385 		freelist = NULL;
2386 	else if (OFF_SLAB(cachep)) {
2387 		/* Slab management obj is off-slab. */
2388 		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2389 					      local_flags, nodeid);
2390 		if (!freelist)
2391 			return NULL;
2392 	} else {
2393 		/* We will use last bytes at the slab for freelist */
2394 		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2395 				cachep->freelist_size;
2396 	}
2397 
2398 	return freelist;
2399 }
2400 
2401 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2402 {
2403 	return ((freelist_idx_t *)page->freelist)[idx];
2404 }
2405 
2406 static inline void set_free_obj(struct page *page,
2407 					unsigned int idx, freelist_idx_t val)
2408 {
2409 	((freelist_idx_t *)(page->freelist))[idx] = val;
2410 }
2411 
2412 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2413 {
2414 #if DEBUG
2415 	int i;
2416 
2417 	for (i = 0; i < cachep->num; i++) {
2418 		void *objp = index_to_obj(cachep, page, i);
2419 
2420 		if (cachep->flags & SLAB_STORE_USER)
2421 			*dbg_userword(cachep, objp) = NULL;
2422 
2423 		if (cachep->flags & SLAB_RED_ZONE) {
2424 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2425 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2426 		}
2427 		/*
2428 		 * Constructors are not allowed to allocate memory from the same
2429 		 * cache which they are a constructor for.  Otherwise, deadlock.
2430 		 * They must also be threaded.
2431 		 */
2432 		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2433 			kasan_unpoison_object_data(cachep,
2434 						   objp + obj_offset(cachep));
2435 			cachep->ctor(objp + obj_offset(cachep));
2436 			kasan_poison_object_data(
2437 				cachep, objp + obj_offset(cachep));
2438 		}
2439 
2440 		if (cachep->flags & SLAB_RED_ZONE) {
2441 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2442 				slab_error(cachep, "constructor overwrote the end of an object");
2443 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2444 				slab_error(cachep, "constructor overwrote the start of an object");
2445 		}
2446 		/* need to poison the objs? */
2447 		if (cachep->flags & SLAB_POISON) {
2448 			poison_obj(cachep, objp, POISON_FREE);
2449 			slab_kernel_map(cachep, objp, 0, 0);
2450 		}
2451 	}
2452 #endif
2453 }
2454 
2455 #ifdef CONFIG_SLAB_FREELIST_RANDOM
2456 /* Hold information during a freelist initialization */
2457 union freelist_init_state {
2458 	struct {
2459 		unsigned int pos;
2460 		unsigned int *list;
2461 		unsigned int count;
2462 	};
2463 	struct rnd_state rnd_state;
2464 };
2465 
2466 /*
2467  * Initialize the state based on the randomization methode available.
2468  * return true if the pre-computed list is available, false otherwize.
2469  */
2470 static bool freelist_state_initialize(union freelist_init_state *state,
2471 				struct kmem_cache *cachep,
2472 				unsigned int count)
2473 {
2474 	bool ret;
2475 	unsigned int rand;
2476 
2477 	/* Use best entropy available to define a random shift */
2478 	rand = get_random_int();
2479 
2480 	/* Use a random state if the pre-computed list is not available */
2481 	if (!cachep->random_seq) {
2482 		prandom_seed_state(&state->rnd_state, rand);
2483 		ret = false;
2484 	} else {
2485 		state->list = cachep->random_seq;
2486 		state->count = count;
2487 		state->pos = rand % count;
2488 		ret = true;
2489 	}
2490 	return ret;
2491 }
2492 
2493 /* Get the next entry on the list and randomize it using a random shift */
2494 static freelist_idx_t next_random_slot(union freelist_init_state *state)
2495 {
2496 	if (state->pos >= state->count)
2497 		state->pos = 0;
2498 	return state->list[state->pos++];
2499 }
2500 
2501 /* Swap two freelist entries */
2502 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2503 {
2504 	swap(((freelist_idx_t *)page->freelist)[a],
2505 		((freelist_idx_t *)page->freelist)[b]);
2506 }
2507 
2508 /*
2509  * Shuffle the freelist initialization state based on pre-computed lists.
2510  * return true if the list was successfully shuffled, false otherwise.
2511  */
2512 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2513 {
2514 	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2515 	union freelist_init_state state;
2516 	bool precomputed;
2517 
2518 	if (count < 2)
2519 		return false;
2520 
2521 	precomputed = freelist_state_initialize(&state, cachep, count);
2522 
2523 	/* Take a random entry as the objfreelist */
2524 	if (OBJFREELIST_SLAB(cachep)) {
2525 		if (!precomputed)
2526 			objfreelist = count - 1;
2527 		else
2528 			objfreelist = next_random_slot(&state);
2529 		page->freelist = index_to_obj(cachep, page, objfreelist) +
2530 						obj_offset(cachep);
2531 		count--;
2532 	}
2533 
2534 	/*
2535 	 * On early boot, generate the list dynamically.
2536 	 * Later use a pre-computed list for speed.
2537 	 */
2538 	if (!precomputed) {
2539 		for (i = 0; i < count; i++)
2540 			set_free_obj(page, i, i);
2541 
2542 		/* Fisher-Yates shuffle */
2543 		for (i = count - 1; i > 0; i--) {
2544 			rand = prandom_u32_state(&state.rnd_state);
2545 			rand %= (i + 1);
2546 			swap_free_obj(page, i, rand);
2547 		}
2548 	} else {
2549 		for (i = 0; i < count; i++)
2550 			set_free_obj(page, i, next_random_slot(&state));
2551 	}
2552 
2553 	if (OBJFREELIST_SLAB(cachep))
2554 		set_free_obj(page, cachep->num - 1, objfreelist);
2555 
2556 	return true;
2557 }
2558 #else
2559 static inline bool shuffle_freelist(struct kmem_cache *cachep,
2560 				struct page *page)
2561 {
2562 	return false;
2563 }
2564 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
2565 
2566 static void cache_init_objs(struct kmem_cache *cachep,
2567 			    struct page *page)
2568 {
2569 	int i;
2570 	void *objp;
2571 	bool shuffled;
2572 
2573 	cache_init_objs_debug(cachep, page);
2574 
2575 	/* Try to randomize the freelist if enabled */
2576 	shuffled = shuffle_freelist(cachep, page);
2577 
2578 	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2579 		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2580 						obj_offset(cachep);
2581 	}
2582 
2583 	for (i = 0; i < cachep->num; i++) {
2584 		objp = index_to_obj(cachep, page, i);
2585 		kasan_init_slab_obj(cachep, objp);
2586 
2587 		/* constructor could break poison info */
2588 		if (DEBUG == 0 && cachep->ctor) {
2589 			kasan_unpoison_object_data(cachep, objp);
2590 			cachep->ctor(objp);
2591 			kasan_poison_object_data(cachep, objp);
2592 		}
2593 
2594 		if (!shuffled)
2595 			set_free_obj(page, i, i);
2596 	}
2597 }
2598 
2599 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2600 {
2601 	void *objp;
2602 
2603 	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2604 	page->active++;
2605 
2606 #if DEBUG
2607 	if (cachep->flags & SLAB_STORE_USER)
2608 		set_store_user_dirty(cachep);
2609 #endif
2610 
2611 	return objp;
2612 }
2613 
2614 static void slab_put_obj(struct kmem_cache *cachep,
2615 			struct page *page, void *objp)
2616 {
2617 	unsigned int objnr = obj_to_index(cachep, page, objp);
2618 #if DEBUG
2619 	unsigned int i;
2620 
2621 	/* Verify double free bug */
2622 	for (i = page->active; i < cachep->num; i++) {
2623 		if (get_free_obj(page, i) == objnr) {
2624 			pr_err("slab: double free detected in cache '%s', objp %p\n",
2625 			       cachep->name, objp);
2626 			BUG();
2627 		}
2628 	}
2629 #endif
2630 	page->active--;
2631 	if (!page->freelist)
2632 		page->freelist = objp + obj_offset(cachep);
2633 
2634 	set_free_obj(page, page->active, objnr);
2635 }
2636 
2637 /*
2638  * Map pages beginning at addr to the given cache and slab. This is required
2639  * for the slab allocator to be able to lookup the cache and slab of a
2640  * virtual address for kfree, ksize, and slab debugging.
2641  */
2642 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2643 			   void *freelist)
2644 {
2645 	page->slab_cache = cache;
2646 	page->freelist = freelist;
2647 }
2648 
2649 /*
2650  * Grow (by 1) the number of slabs within a cache.  This is called by
2651  * kmem_cache_alloc() when there are no active objs left in a cache.
2652  */
2653 static struct page *cache_grow_begin(struct kmem_cache *cachep,
2654 				gfp_t flags, int nodeid)
2655 {
2656 	void *freelist;
2657 	size_t offset;
2658 	gfp_t local_flags;
2659 	int page_node;
2660 	struct kmem_cache_node *n;
2661 	struct page *page;
2662 
2663 	/*
2664 	 * Be lazy and only check for valid flags here,  keeping it out of the
2665 	 * critical path in kmem_cache_alloc().
2666 	 */
2667 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2668 		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2669 		flags &= ~GFP_SLAB_BUG_MASK;
2670 		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2671 				invalid_mask, &invalid_mask, flags, &flags);
2672 		dump_stack();
2673 	}
2674 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2675 
2676 	check_irq_off();
2677 	if (gfpflags_allow_blocking(local_flags))
2678 		local_irq_enable();
2679 
2680 	/*
2681 	 * Get mem for the objs.  Attempt to allocate a physical page from
2682 	 * 'nodeid'.
2683 	 */
2684 	page = kmem_getpages(cachep, local_flags, nodeid);
2685 	if (!page)
2686 		goto failed;
2687 
2688 	page_node = page_to_nid(page);
2689 	n = get_node(cachep, page_node);
2690 
2691 	/* Get colour for the slab, and cal the next value. */
2692 	n->colour_next++;
2693 	if (n->colour_next >= cachep->colour)
2694 		n->colour_next = 0;
2695 
2696 	offset = n->colour_next;
2697 	if (offset >= cachep->colour)
2698 		offset = 0;
2699 
2700 	offset *= cachep->colour_off;
2701 
2702 	/* Get slab management. */
2703 	freelist = alloc_slabmgmt(cachep, page, offset,
2704 			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2705 	if (OFF_SLAB(cachep) && !freelist)
2706 		goto opps1;
2707 
2708 	slab_map_pages(cachep, page, freelist);
2709 
2710 	kasan_poison_slab(page);
2711 	cache_init_objs(cachep, page);
2712 
2713 	if (gfpflags_allow_blocking(local_flags))
2714 		local_irq_disable();
2715 
2716 	return page;
2717 
2718 opps1:
2719 	kmem_freepages(cachep, page);
2720 failed:
2721 	if (gfpflags_allow_blocking(local_flags))
2722 		local_irq_disable();
2723 	return NULL;
2724 }
2725 
2726 static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2727 {
2728 	struct kmem_cache_node *n;
2729 	void *list = NULL;
2730 
2731 	check_irq_off();
2732 
2733 	if (!page)
2734 		return;
2735 
2736 	INIT_LIST_HEAD(&page->lru);
2737 	n = get_node(cachep, page_to_nid(page));
2738 
2739 	spin_lock(&n->list_lock);
2740 	n->total_slabs++;
2741 	if (!page->active) {
2742 		list_add_tail(&page->lru, &(n->slabs_free));
2743 		n->free_slabs++;
2744 	} else
2745 		fixup_slab_list(cachep, n, page, &list);
2746 
2747 	STATS_INC_GROWN(cachep);
2748 	n->free_objects += cachep->num - page->active;
2749 	spin_unlock(&n->list_lock);
2750 
2751 	fixup_objfreelist_debug(cachep, &list);
2752 }
2753 
2754 #if DEBUG
2755 
2756 /*
2757  * Perform extra freeing checks:
2758  * - detect bad pointers.
2759  * - POISON/RED_ZONE checking
2760  */
2761 static void kfree_debugcheck(const void *objp)
2762 {
2763 	if (!virt_addr_valid(objp)) {
2764 		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2765 		       (unsigned long)objp);
2766 		BUG();
2767 	}
2768 }
2769 
2770 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2771 {
2772 	unsigned long long redzone1, redzone2;
2773 
2774 	redzone1 = *dbg_redzone1(cache, obj);
2775 	redzone2 = *dbg_redzone2(cache, obj);
2776 
2777 	/*
2778 	 * Redzone is ok.
2779 	 */
2780 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2781 		return;
2782 
2783 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2784 		slab_error(cache, "double free detected");
2785 	else
2786 		slab_error(cache, "memory outside object was overwritten");
2787 
2788 	pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2789 	       obj, redzone1, redzone2);
2790 }
2791 
2792 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2793 				   unsigned long caller)
2794 {
2795 	unsigned int objnr;
2796 	struct page *page;
2797 
2798 	BUG_ON(virt_to_cache(objp) != cachep);
2799 
2800 	objp -= obj_offset(cachep);
2801 	kfree_debugcheck(objp);
2802 	page = virt_to_head_page(objp);
2803 
2804 	if (cachep->flags & SLAB_RED_ZONE) {
2805 		verify_redzone_free(cachep, objp);
2806 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2807 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2808 	}
2809 	if (cachep->flags & SLAB_STORE_USER) {
2810 		set_store_user_dirty(cachep);
2811 		*dbg_userword(cachep, objp) = (void *)caller;
2812 	}
2813 
2814 	objnr = obj_to_index(cachep, page, objp);
2815 
2816 	BUG_ON(objnr >= cachep->num);
2817 	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2818 
2819 	if (cachep->flags & SLAB_POISON) {
2820 		poison_obj(cachep, objp, POISON_FREE);
2821 		slab_kernel_map(cachep, objp, 0, caller);
2822 	}
2823 	return objp;
2824 }
2825 
2826 #else
2827 #define kfree_debugcheck(x) do { } while(0)
2828 #define cache_free_debugcheck(x,objp,z) (objp)
2829 #endif
2830 
2831 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2832 						void **list)
2833 {
2834 #if DEBUG
2835 	void *next = *list;
2836 	void *objp;
2837 
2838 	while (next) {
2839 		objp = next - obj_offset(cachep);
2840 		next = *(void **)next;
2841 		poison_obj(cachep, objp, POISON_FREE);
2842 	}
2843 #endif
2844 }
2845 
2846 static inline void fixup_slab_list(struct kmem_cache *cachep,
2847 				struct kmem_cache_node *n, struct page *page,
2848 				void **list)
2849 {
2850 	/* move slabp to correct slabp list: */
2851 	list_del(&page->lru);
2852 	if (page->active == cachep->num) {
2853 		list_add(&page->lru, &n->slabs_full);
2854 		if (OBJFREELIST_SLAB(cachep)) {
2855 #if DEBUG
2856 			/* Poisoning will be done without holding the lock */
2857 			if (cachep->flags & SLAB_POISON) {
2858 				void **objp = page->freelist;
2859 
2860 				*objp = *list;
2861 				*list = objp;
2862 			}
2863 #endif
2864 			page->freelist = NULL;
2865 		}
2866 	} else
2867 		list_add(&page->lru, &n->slabs_partial);
2868 }
2869 
2870 /* Try to find non-pfmemalloc slab if needed */
2871 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2872 					struct page *page, bool pfmemalloc)
2873 {
2874 	if (!page)
2875 		return NULL;
2876 
2877 	if (pfmemalloc)
2878 		return page;
2879 
2880 	if (!PageSlabPfmemalloc(page))
2881 		return page;
2882 
2883 	/* No need to keep pfmemalloc slab if we have enough free objects */
2884 	if (n->free_objects > n->free_limit) {
2885 		ClearPageSlabPfmemalloc(page);
2886 		return page;
2887 	}
2888 
2889 	/* Move pfmemalloc slab to the end of list to speed up next search */
2890 	list_del(&page->lru);
2891 	if (!page->active) {
2892 		list_add_tail(&page->lru, &n->slabs_free);
2893 		n->free_slabs++;
2894 	} else
2895 		list_add_tail(&page->lru, &n->slabs_partial);
2896 
2897 	list_for_each_entry(page, &n->slabs_partial, lru) {
2898 		if (!PageSlabPfmemalloc(page))
2899 			return page;
2900 	}
2901 
2902 	n->free_touched = 1;
2903 	list_for_each_entry(page, &n->slabs_free, lru) {
2904 		if (!PageSlabPfmemalloc(page)) {
2905 			n->free_slabs--;
2906 			return page;
2907 		}
2908 	}
2909 
2910 	return NULL;
2911 }
2912 
2913 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2914 {
2915 	struct page *page;
2916 
2917 	assert_spin_locked(&n->list_lock);
2918 	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2919 	if (!page) {
2920 		n->free_touched = 1;
2921 		page = list_first_entry_or_null(&n->slabs_free, struct page,
2922 						lru);
2923 		if (page)
2924 			n->free_slabs--;
2925 	}
2926 
2927 	if (sk_memalloc_socks())
2928 		page = get_valid_first_slab(n, page, pfmemalloc);
2929 
2930 	return page;
2931 }
2932 
2933 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2934 				struct kmem_cache_node *n, gfp_t flags)
2935 {
2936 	struct page *page;
2937 	void *obj;
2938 	void *list = NULL;
2939 
2940 	if (!gfp_pfmemalloc_allowed(flags))
2941 		return NULL;
2942 
2943 	spin_lock(&n->list_lock);
2944 	page = get_first_slab(n, true);
2945 	if (!page) {
2946 		spin_unlock(&n->list_lock);
2947 		return NULL;
2948 	}
2949 
2950 	obj = slab_get_obj(cachep, page);
2951 	n->free_objects--;
2952 
2953 	fixup_slab_list(cachep, n, page, &list);
2954 
2955 	spin_unlock(&n->list_lock);
2956 	fixup_objfreelist_debug(cachep, &list);
2957 
2958 	return obj;
2959 }
2960 
2961 /*
2962  * Slab list should be fixed up by fixup_slab_list() for existing slab
2963  * or cache_grow_end() for new slab
2964  */
2965 static __always_inline int alloc_block(struct kmem_cache *cachep,
2966 		struct array_cache *ac, struct page *page, int batchcount)
2967 {
2968 	/*
2969 	 * There must be at least one object available for
2970 	 * allocation.
2971 	 */
2972 	BUG_ON(page->active >= cachep->num);
2973 
2974 	while (page->active < cachep->num && batchcount--) {
2975 		STATS_INC_ALLOCED(cachep);
2976 		STATS_INC_ACTIVE(cachep);
2977 		STATS_SET_HIGH(cachep);
2978 
2979 		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2980 	}
2981 
2982 	return batchcount;
2983 }
2984 
2985 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2986 {
2987 	int batchcount;
2988 	struct kmem_cache_node *n;
2989 	struct array_cache *ac, *shared;
2990 	int node;
2991 	void *list = NULL;
2992 	struct page *page;
2993 
2994 	check_irq_off();
2995 	node = numa_mem_id();
2996 
2997 	ac = cpu_cache_get(cachep);
2998 	batchcount = ac->batchcount;
2999 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3000 		/*
3001 		 * If there was little recent activity on this cache, then
3002 		 * perform only a partial refill.  Otherwise we could generate
3003 		 * refill bouncing.
3004 		 */
3005 		batchcount = BATCHREFILL_LIMIT;
3006 	}
3007 	n = get_node(cachep, node);
3008 
3009 	BUG_ON(ac->avail > 0 || !n);
3010 	shared = READ_ONCE(n->shared);
3011 	if (!n->free_objects && (!shared || !shared->avail))
3012 		goto direct_grow;
3013 
3014 	spin_lock(&n->list_lock);
3015 	shared = READ_ONCE(n->shared);
3016 
3017 	/* See if we can refill from the shared array */
3018 	if (shared && transfer_objects(ac, shared, batchcount)) {
3019 		shared->touched = 1;
3020 		goto alloc_done;
3021 	}
3022 
3023 	while (batchcount > 0) {
3024 		/* Get slab alloc is to come from. */
3025 		page = get_first_slab(n, false);
3026 		if (!page)
3027 			goto must_grow;
3028 
3029 		check_spinlock_acquired(cachep);
3030 
3031 		batchcount = alloc_block(cachep, ac, page, batchcount);
3032 		fixup_slab_list(cachep, n, page, &list);
3033 	}
3034 
3035 must_grow:
3036 	n->free_objects -= ac->avail;
3037 alloc_done:
3038 	spin_unlock(&n->list_lock);
3039 	fixup_objfreelist_debug(cachep, &list);
3040 
3041 direct_grow:
3042 	if (unlikely(!ac->avail)) {
3043 		/* Check if we can use obj in pfmemalloc slab */
3044 		if (sk_memalloc_socks()) {
3045 			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3046 
3047 			if (obj)
3048 				return obj;
3049 		}
3050 
3051 		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3052 
3053 		/*
3054 		 * cache_grow_begin() can reenable interrupts,
3055 		 * then ac could change.
3056 		 */
3057 		ac = cpu_cache_get(cachep);
3058 		if (!ac->avail && page)
3059 			alloc_block(cachep, ac, page, batchcount);
3060 		cache_grow_end(cachep, page);
3061 
3062 		if (!ac->avail)
3063 			return NULL;
3064 	}
3065 	ac->touched = 1;
3066 
3067 	return ac->entry[--ac->avail];
3068 }
3069 
3070 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3071 						gfp_t flags)
3072 {
3073 	might_sleep_if(gfpflags_allow_blocking(flags));
3074 }
3075 
3076 #if DEBUG
3077 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3078 				gfp_t flags, void *objp, unsigned long caller)
3079 {
3080 	if (!objp)
3081 		return objp;
3082 	if (cachep->flags & SLAB_POISON) {
3083 		check_poison_obj(cachep, objp);
3084 		slab_kernel_map(cachep, objp, 1, 0);
3085 		poison_obj(cachep, objp, POISON_INUSE);
3086 	}
3087 	if (cachep->flags & SLAB_STORE_USER)
3088 		*dbg_userword(cachep, objp) = (void *)caller;
3089 
3090 	if (cachep->flags & SLAB_RED_ZONE) {
3091 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3092 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3093 			slab_error(cachep, "double free, or memory outside object was overwritten");
3094 			pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3095 			       objp, *dbg_redzone1(cachep, objp),
3096 			       *dbg_redzone2(cachep, objp));
3097 		}
3098 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3099 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3100 	}
3101 
3102 	objp += obj_offset(cachep);
3103 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3104 		cachep->ctor(objp);
3105 	if (ARCH_SLAB_MINALIGN &&
3106 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3107 		pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3108 		       objp, (int)ARCH_SLAB_MINALIGN);
3109 	}
3110 	return objp;
3111 }
3112 #else
3113 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3114 #endif
3115 
3116 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3117 {
3118 	void *objp;
3119 	struct array_cache *ac;
3120 
3121 	check_irq_off();
3122 
3123 	ac = cpu_cache_get(cachep);
3124 	if (likely(ac->avail)) {
3125 		ac->touched = 1;
3126 		objp = ac->entry[--ac->avail];
3127 
3128 		STATS_INC_ALLOCHIT(cachep);
3129 		goto out;
3130 	}
3131 
3132 	STATS_INC_ALLOCMISS(cachep);
3133 	objp = cache_alloc_refill(cachep, flags);
3134 	/*
3135 	 * the 'ac' may be updated by cache_alloc_refill(),
3136 	 * and kmemleak_erase() requires its correct value.
3137 	 */
3138 	ac = cpu_cache_get(cachep);
3139 
3140 out:
3141 	/*
3142 	 * To avoid a false negative, if an object that is in one of the
3143 	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3144 	 * treat the array pointers as a reference to the object.
3145 	 */
3146 	if (objp)
3147 		kmemleak_erase(&ac->entry[ac->avail]);
3148 	return objp;
3149 }
3150 
3151 #ifdef CONFIG_NUMA
3152 /*
3153  * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3154  *
3155  * If we are in_interrupt, then process context, including cpusets and
3156  * mempolicy, may not apply and should not be used for allocation policy.
3157  */
3158 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3159 {
3160 	int nid_alloc, nid_here;
3161 
3162 	if (in_interrupt() || (flags & __GFP_THISNODE))
3163 		return NULL;
3164 	nid_alloc = nid_here = numa_mem_id();
3165 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3166 		nid_alloc = cpuset_slab_spread_node();
3167 	else if (current->mempolicy)
3168 		nid_alloc = mempolicy_slab_node();
3169 	if (nid_alloc != nid_here)
3170 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3171 	return NULL;
3172 }
3173 
3174 /*
3175  * Fallback function if there was no memory available and no objects on a
3176  * certain node and fall back is permitted. First we scan all the
3177  * available node for available objects. If that fails then we
3178  * perform an allocation without specifying a node. This allows the page
3179  * allocator to do its reclaim / fallback magic. We then insert the
3180  * slab into the proper nodelist and then allocate from it.
3181  */
3182 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3183 {
3184 	struct zonelist *zonelist;
3185 	struct zoneref *z;
3186 	struct zone *zone;
3187 	enum zone_type high_zoneidx = gfp_zone(flags);
3188 	void *obj = NULL;
3189 	struct page *page;
3190 	int nid;
3191 	unsigned int cpuset_mems_cookie;
3192 
3193 	if (flags & __GFP_THISNODE)
3194 		return NULL;
3195 
3196 retry_cpuset:
3197 	cpuset_mems_cookie = read_mems_allowed_begin();
3198 	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3199 
3200 retry:
3201 	/*
3202 	 * Look through allowed nodes for objects available
3203 	 * from existing per node queues.
3204 	 */
3205 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3206 		nid = zone_to_nid(zone);
3207 
3208 		if (cpuset_zone_allowed(zone, flags) &&
3209 			get_node(cache, nid) &&
3210 			get_node(cache, nid)->free_objects) {
3211 				obj = ____cache_alloc_node(cache,
3212 					gfp_exact_node(flags), nid);
3213 				if (obj)
3214 					break;
3215 		}
3216 	}
3217 
3218 	if (!obj) {
3219 		/*
3220 		 * This allocation will be performed within the constraints
3221 		 * of the current cpuset / memory policy requirements.
3222 		 * We may trigger various forms of reclaim on the allowed
3223 		 * set and go into memory reserves if necessary.
3224 		 */
3225 		page = cache_grow_begin(cache, flags, numa_mem_id());
3226 		cache_grow_end(cache, page);
3227 		if (page) {
3228 			nid = page_to_nid(page);
3229 			obj = ____cache_alloc_node(cache,
3230 				gfp_exact_node(flags), nid);
3231 
3232 			/*
3233 			 * Another processor may allocate the objects in
3234 			 * the slab since we are not holding any locks.
3235 			 */
3236 			if (!obj)
3237 				goto retry;
3238 		}
3239 	}
3240 
3241 	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3242 		goto retry_cpuset;
3243 	return obj;
3244 }
3245 
3246 /*
3247  * A interface to enable slab creation on nodeid
3248  */
3249 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3250 				int nodeid)
3251 {
3252 	struct page *page;
3253 	struct kmem_cache_node *n;
3254 	void *obj = NULL;
3255 	void *list = NULL;
3256 
3257 	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3258 	n = get_node(cachep, nodeid);
3259 	BUG_ON(!n);
3260 
3261 	check_irq_off();
3262 	spin_lock(&n->list_lock);
3263 	page = get_first_slab(n, false);
3264 	if (!page)
3265 		goto must_grow;
3266 
3267 	check_spinlock_acquired_node(cachep, nodeid);
3268 
3269 	STATS_INC_NODEALLOCS(cachep);
3270 	STATS_INC_ACTIVE(cachep);
3271 	STATS_SET_HIGH(cachep);
3272 
3273 	BUG_ON(page->active == cachep->num);
3274 
3275 	obj = slab_get_obj(cachep, page);
3276 	n->free_objects--;
3277 
3278 	fixup_slab_list(cachep, n, page, &list);
3279 
3280 	spin_unlock(&n->list_lock);
3281 	fixup_objfreelist_debug(cachep, &list);
3282 	return obj;
3283 
3284 must_grow:
3285 	spin_unlock(&n->list_lock);
3286 	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3287 	if (page) {
3288 		/* This slab isn't counted yet so don't update free_objects */
3289 		obj = slab_get_obj(cachep, page);
3290 	}
3291 	cache_grow_end(cachep, page);
3292 
3293 	return obj ? obj : fallback_alloc(cachep, flags);
3294 }
3295 
3296 static __always_inline void *
3297 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3298 		   unsigned long caller)
3299 {
3300 	unsigned long save_flags;
3301 	void *ptr;
3302 	int slab_node = numa_mem_id();
3303 
3304 	flags &= gfp_allowed_mask;
3305 	cachep = slab_pre_alloc_hook(cachep, flags);
3306 	if (unlikely(!cachep))
3307 		return NULL;
3308 
3309 	cache_alloc_debugcheck_before(cachep, flags);
3310 	local_irq_save(save_flags);
3311 
3312 	if (nodeid == NUMA_NO_NODE)
3313 		nodeid = slab_node;
3314 
3315 	if (unlikely(!get_node(cachep, nodeid))) {
3316 		/* Node not bootstrapped yet */
3317 		ptr = fallback_alloc(cachep, flags);
3318 		goto out;
3319 	}
3320 
3321 	if (nodeid == slab_node) {
3322 		/*
3323 		 * Use the locally cached objects if possible.
3324 		 * However ____cache_alloc does not allow fallback
3325 		 * to other nodes. It may fail while we still have
3326 		 * objects on other nodes available.
3327 		 */
3328 		ptr = ____cache_alloc(cachep, flags);
3329 		if (ptr)
3330 			goto out;
3331 	}
3332 	/* ___cache_alloc_node can fall back to other nodes */
3333 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3334   out:
3335 	local_irq_restore(save_flags);
3336 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3337 
3338 	if (unlikely(flags & __GFP_ZERO) && ptr)
3339 		memset(ptr, 0, cachep->object_size);
3340 
3341 	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3342 	return ptr;
3343 }
3344 
3345 static __always_inline void *
3346 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3347 {
3348 	void *objp;
3349 
3350 	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3351 		objp = alternate_node_alloc(cache, flags);
3352 		if (objp)
3353 			goto out;
3354 	}
3355 	objp = ____cache_alloc(cache, flags);
3356 
3357 	/*
3358 	 * We may just have run out of memory on the local node.
3359 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3360 	 */
3361 	if (!objp)
3362 		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3363 
3364   out:
3365 	return objp;
3366 }
3367 #else
3368 
3369 static __always_inline void *
3370 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3371 {
3372 	return ____cache_alloc(cachep, flags);
3373 }
3374 
3375 #endif /* CONFIG_NUMA */
3376 
3377 static __always_inline void *
3378 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3379 {
3380 	unsigned long save_flags;
3381 	void *objp;
3382 
3383 	flags &= gfp_allowed_mask;
3384 	cachep = slab_pre_alloc_hook(cachep, flags);
3385 	if (unlikely(!cachep))
3386 		return NULL;
3387 
3388 	cache_alloc_debugcheck_before(cachep, flags);
3389 	local_irq_save(save_flags);
3390 	objp = __do_cache_alloc(cachep, flags);
3391 	local_irq_restore(save_flags);
3392 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3393 	prefetchw(objp);
3394 
3395 	if (unlikely(flags & __GFP_ZERO) && objp)
3396 		memset(objp, 0, cachep->object_size);
3397 
3398 	slab_post_alloc_hook(cachep, flags, 1, &objp);
3399 	return objp;
3400 }
3401 
3402 /*
3403  * Caller needs to acquire correct kmem_cache_node's list_lock
3404  * @list: List of detached free slabs should be freed by caller
3405  */
3406 static void free_block(struct kmem_cache *cachep, void **objpp,
3407 			int nr_objects, int node, struct list_head *list)
3408 {
3409 	int i;
3410 	struct kmem_cache_node *n = get_node(cachep, node);
3411 	struct page *page;
3412 
3413 	n->free_objects += nr_objects;
3414 
3415 	for (i = 0; i < nr_objects; i++) {
3416 		void *objp;
3417 		struct page *page;
3418 
3419 		objp = objpp[i];
3420 
3421 		page = virt_to_head_page(objp);
3422 		list_del(&page->lru);
3423 		check_spinlock_acquired_node(cachep, node);
3424 		slab_put_obj(cachep, page, objp);
3425 		STATS_DEC_ACTIVE(cachep);
3426 
3427 		/* fixup slab chains */
3428 		if (page->active == 0) {
3429 			list_add(&page->lru, &n->slabs_free);
3430 			n->free_slabs++;
3431 		} else {
3432 			/* Unconditionally move a slab to the end of the
3433 			 * partial list on free - maximum time for the
3434 			 * other objects to be freed, too.
3435 			 */
3436 			list_add_tail(&page->lru, &n->slabs_partial);
3437 		}
3438 	}
3439 
3440 	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3441 		n->free_objects -= cachep->num;
3442 
3443 		page = list_last_entry(&n->slabs_free, struct page, lru);
3444 		list_move(&page->lru, list);
3445 		n->free_slabs--;
3446 		n->total_slabs--;
3447 	}
3448 }
3449 
3450 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3451 {
3452 	int batchcount;
3453 	struct kmem_cache_node *n;
3454 	int node = numa_mem_id();
3455 	LIST_HEAD(list);
3456 
3457 	batchcount = ac->batchcount;
3458 
3459 	check_irq_off();
3460 	n = get_node(cachep, node);
3461 	spin_lock(&n->list_lock);
3462 	if (n->shared) {
3463 		struct array_cache *shared_array = n->shared;
3464 		int max = shared_array->limit - shared_array->avail;
3465 		if (max) {
3466 			if (batchcount > max)
3467 				batchcount = max;
3468 			memcpy(&(shared_array->entry[shared_array->avail]),
3469 			       ac->entry, sizeof(void *) * batchcount);
3470 			shared_array->avail += batchcount;
3471 			goto free_done;
3472 		}
3473 	}
3474 
3475 	free_block(cachep, ac->entry, batchcount, node, &list);
3476 free_done:
3477 #if STATS
3478 	{
3479 		int i = 0;
3480 		struct page *page;
3481 
3482 		list_for_each_entry(page, &n->slabs_free, lru) {
3483 			BUG_ON(page->active);
3484 
3485 			i++;
3486 		}
3487 		STATS_SET_FREEABLE(cachep, i);
3488 	}
3489 #endif
3490 	spin_unlock(&n->list_lock);
3491 	slabs_destroy(cachep, &list);
3492 	ac->avail -= batchcount;
3493 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3494 }
3495 
3496 /*
3497  * Release an obj back to its cache. If the obj has a constructed state, it must
3498  * be in this state _before_ it is released.  Called with disabled ints.
3499  */
3500 static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3501 				unsigned long caller)
3502 {
3503 	/* Put the object into the quarantine, don't touch it for now. */
3504 	if (kasan_slab_free(cachep, objp))
3505 		return;
3506 
3507 	___cache_free(cachep, objp, caller);
3508 }
3509 
3510 void ___cache_free(struct kmem_cache *cachep, void *objp,
3511 		unsigned long caller)
3512 {
3513 	struct array_cache *ac = cpu_cache_get(cachep);
3514 
3515 	check_irq_off();
3516 	kmemleak_free_recursive(objp, cachep->flags);
3517 	objp = cache_free_debugcheck(cachep, objp, caller);
3518 
3519 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3520 
3521 	/*
3522 	 * Skip calling cache_free_alien() when the platform is not numa.
3523 	 * This will avoid cache misses that happen while accessing slabp (which
3524 	 * is per page memory  reference) to get nodeid. Instead use a global
3525 	 * variable to skip the call, which is mostly likely to be present in
3526 	 * the cache.
3527 	 */
3528 	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3529 		return;
3530 
3531 	if (ac->avail < ac->limit) {
3532 		STATS_INC_FREEHIT(cachep);
3533 	} else {
3534 		STATS_INC_FREEMISS(cachep);
3535 		cache_flusharray(cachep, ac);
3536 	}
3537 
3538 	if (sk_memalloc_socks()) {
3539 		struct page *page = virt_to_head_page(objp);
3540 
3541 		if (unlikely(PageSlabPfmemalloc(page))) {
3542 			cache_free_pfmemalloc(cachep, page, objp);
3543 			return;
3544 		}
3545 	}
3546 
3547 	ac->entry[ac->avail++] = objp;
3548 }
3549 
3550 /**
3551  * kmem_cache_alloc - Allocate an object
3552  * @cachep: The cache to allocate from.
3553  * @flags: See kmalloc().
3554  *
3555  * Allocate an object from this cache.  The flags are only relevant
3556  * if the cache has no available objects.
3557  */
3558 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3559 {
3560 	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3561 
3562 	kasan_slab_alloc(cachep, ret, flags);
3563 	trace_kmem_cache_alloc(_RET_IP_, ret,
3564 			       cachep->object_size, cachep->size, flags);
3565 
3566 	return ret;
3567 }
3568 EXPORT_SYMBOL(kmem_cache_alloc);
3569 
3570 static __always_inline void
3571 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3572 				  size_t size, void **p, unsigned long caller)
3573 {
3574 	size_t i;
3575 
3576 	for (i = 0; i < size; i++)
3577 		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3578 }
3579 
3580 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3581 			  void **p)
3582 {
3583 	size_t i;
3584 
3585 	s = slab_pre_alloc_hook(s, flags);
3586 	if (!s)
3587 		return 0;
3588 
3589 	cache_alloc_debugcheck_before(s, flags);
3590 
3591 	local_irq_disable();
3592 	for (i = 0; i < size; i++) {
3593 		void *objp = __do_cache_alloc(s, flags);
3594 
3595 		if (unlikely(!objp))
3596 			goto error;
3597 		p[i] = objp;
3598 	}
3599 	local_irq_enable();
3600 
3601 	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3602 
3603 	/* Clear memory outside IRQ disabled section */
3604 	if (unlikely(flags & __GFP_ZERO))
3605 		for (i = 0; i < size; i++)
3606 			memset(p[i], 0, s->object_size);
3607 
3608 	slab_post_alloc_hook(s, flags, size, p);
3609 	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3610 	return size;
3611 error:
3612 	local_irq_enable();
3613 	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3614 	slab_post_alloc_hook(s, flags, i, p);
3615 	__kmem_cache_free_bulk(s, i, p);
3616 	return 0;
3617 }
3618 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3619 
3620 #ifdef CONFIG_TRACING
3621 void *
3622 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3623 {
3624 	void *ret;
3625 
3626 	ret = slab_alloc(cachep, flags, _RET_IP_);
3627 
3628 	kasan_kmalloc(cachep, ret, size, flags);
3629 	trace_kmalloc(_RET_IP_, ret,
3630 		      size, cachep->size, flags);
3631 	return ret;
3632 }
3633 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3634 #endif
3635 
3636 #ifdef CONFIG_NUMA
3637 /**
3638  * kmem_cache_alloc_node - Allocate an object on the specified node
3639  * @cachep: The cache to allocate from.
3640  * @flags: See kmalloc().
3641  * @nodeid: node number of the target node.
3642  *
3643  * Identical to kmem_cache_alloc but it will allocate memory on the given
3644  * node, which can improve the performance for cpu bound structures.
3645  *
3646  * Fallback to other node is possible if __GFP_THISNODE is not set.
3647  */
3648 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3649 {
3650 	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3651 
3652 	kasan_slab_alloc(cachep, ret, flags);
3653 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3654 				    cachep->object_size, cachep->size,
3655 				    flags, nodeid);
3656 
3657 	return ret;
3658 }
3659 EXPORT_SYMBOL(kmem_cache_alloc_node);
3660 
3661 #ifdef CONFIG_TRACING
3662 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3663 				  gfp_t flags,
3664 				  int nodeid,
3665 				  size_t size)
3666 {
3667 	void *ret;
3668 
3669 	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3670 
3671 	kasan_kmalloc(cachep, ret, size, flags);
3672 	trace_kmalloc_node(_RET_IP_, ret,
3673 			   size, cachep->size,
3674 			   flags, nodeid);
3675 	return ret;
3676 }
3677 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3678 #endif
3679 
3680 static __always_inline void *
3681 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3682 {
3683 	struct kmem_cache *cachep;
3684 	void *ret;
3685 
3686 	cachep = kmalloc_slab(size, flags);
3687 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3688 		return cachep;
3689 	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3690 	kasan_kmalloc(cachep, ret, size, flags);
3691 
3692 	return ret;
3693 }
3694 
3695 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3696 {
3697 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3698 }
3699 EXPORT_SYMBOL(__kmalloc_node);
3700 
3701 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3702 		int node, unsigned long caller)
3703 {
3704 	return __do_kmalloc_node(size, flags, node, caller);
3705 }
3706 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3707 #endif /* CONFIG_NUMA */
3708 
3709 /**
3710  * __do_kmalloc - allocate memory
3711  * @size: how many bytes of memory are required.
3712  * @flags: the type of memory to allocate (see kmalloc).
3713  * @caller: function caller for debug tracking of the caller
3714  */
3715 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3716 					  unsigned long caller)
3717 {
3718 	struct kmem_cache *cachep;
3719 	void *ret;
3720 
3721 	cachep = kmalloc_slab(size, flags);
3722 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3723 		return cachep;
3724 	ret = slab_alloc(cachep, flags, caller);
3725 
3726 	kasan_kmalloc(cachep, ret, size, flags);
3727 	trace_kmalloc(caller, ret,
3728 		      size, cachep->size, flags);
3729 
3730 	return ret;
3731 }
3732 
3733 void *__kmalloc(size_t size, gfp_t flags)
3734 {
3735 	return __do_kmalloc(size, flags, _RET_IP_);
3736 }
3737 EXPORT_SYMBOL(__kmalloc);
3738 
3739 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3740 {
3741 	return __do_kmalloc(size, flags, caller);
3742 }
3743 EXPORT_SYMBOL(__kmalloc_track_caller);
3744 
3745 /**
3746  * kmem_cache_free - Deallocate an object
3747  * @cachep: The cache the allocation was from.
3748  * @objp: The previously allocated object.
3749  *
3750  * Free an object which was previously allocated from this
3751  * cache.
3752  */
3753 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3754 {
3755 	unsigned long flags;
3756 	cachep = cache_from_obj(cachep, objp);
3757 	if (!cachep)
3758 		return;
3759 
3760 	local_irq_save(flags);
3761 	debug_check_no_locks_freed(objp, cachep->object_size);
3762 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3763 		debug_check_no_obj_freed(objp, cachep->object_size);
3764 	__cache_free(cachep, objp, _RET_IP_);
3765 	local_irq_restore(flags);
3766 
3767 	trace_kmem_cache_free(_RET_IP_, objp);
3768 }
3769 EXPORT_SYMBOL(kmem_cache_free);
3770 
3771 void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3772 {
3773 	struct kmem_cache *s;
3774 	size_t i;
3775 
3776 	local_irq_disable();
3777 	for (i = 0; i < size; i++) {
3778 		void *objp = p[i];
3779 
3780 		if (!orig_s) /* called via kfree_bulk */
3781 			s = virt_to_cache(objp);
3782 		else
3783 			s = cache_from_obj(orig_s, objp);
3784 
3785 		debug_check_no_locks_freed(objp, s->object_size);
3786 		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3787 			debug_check_no_obj_freed(objp, s->object_size);
3788 
3789 		__cache_free(s, objp, _RET_IP_);
3790 	}
3791 	local_irq_enable();
3792 
3793 	/* FIXME: add tracing */
3794 }
3795 EXPORT_SYMBOL(kmem_cache_free_bulk);
3796 
3797 /**
3798  * kfree - free previously allocated memory
3799  * @objp: pointer returned by kmalloc.
3800  *
3801  * If @objp is NULL, no operation is performed.
3802  *
3803  * Don't free memory not originally allocated by kmalloc()
3804  * or you will run into trouble.
3805  */
3806 void kfree(const void *objp)
3807 {
3808 	struct kmem_cache *c;
3809 	unsigned long flags;
3810 
3811 	trace_kfree(_RET_IP_, objp);
3812 
3813 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3814 		return;
3815 	local_irq_save(flags);
3816 	kfree_debugcheck(objp);
3817 	c = virt_to_cache(objp);
3818 	debug_check_no_locks_freed(objp, c->object_size);
3819 
3820 	debug_check_no_obj_freed(objp, c->object_size);
3821 	__cache_free(c, (void *)objp, _RET_IP_);
3822 	local_irq_restore(flags);
3823 }
3824 EXPORT_SYMBOL(kfree);
3825 
3826 /*
3827  * This initializes kmem_cache_node or resizes various caches for all nodes.
3828  */
3829 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3830 {
3831 	int ret;
3832 	int node;
3833 	struct kmem_cache_node *n;
3834 
3835 	for_each_online_node(node) {
3836 		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3837 		if (ret)
3838 			goto fail;
3839 
3840 	}
3841 
3842 	return 0;
3843 
3844 fail:
3845 	if (!cachep->list.next) {
3846 		/* Cache is not active yet. Roll back what we did */
3847 		node--;
3848 		while (node >= 0) {
3849 			n = get_node(cachep, node);
3850 			if (n) {
3851 				kfree(n->shared);
3852 				free_alien_cache(n->alien);
3853 				kfree(n);
3854 				cachep->node[node] = NULL;
3855 			}
3856 			node--;
3857 		}
3858 	}
3859 	return -ENOMEM;
3860 }
3861 
3862 /* Always called with the slab_mutex held */
3863 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3864 				int batchcount, int shared, gfp_t gfp)
3865 {
3866 	struct array_cache __percpu *cpu_cache, *prev;
3867 	int cpu;
3868 
3869 	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3870 	if (!cpu_cache)
3871 		return -ENOMEM;
3872 
3873 	prev = cachep->cpu_cache;
3874 	cachep->cpu_cache = cpu_cache;
3875 	/*
3876 	 * Without a previous cpu_cache there's no need to synchronize remote
3877 	 * cpus, so skip the IPIs.
3878 	 */
3879 	if (prev)
3880 		kick_all_cpus_sync();
3881 
3882 	check_irq_on();
3883 	cachep->batchcount = batchcount;
3884 	cachep->limit = limit;
3885 	cachep->shared = shared;
3886 
3887 	if (!prev)
3888 		goto setup_node;
3889 
3890 	for_each_online_cpu(cpu) {
3891 		LIST_HEAD(list);
3892 		int node;
3893 		struct kmem_cache_node *n;
3894 		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3895 
3896 		node = cpu_to_mem(cpu);
3897 		n = get_node(cachep, node);
3898 		spin_lock_irq(&n->list_lock);
3899 		free_block(cachep, ac->entry, ac->avail, node, &list);
3900 		spin_unlock_irq(&n->list_lock);
3901 		slabs_destroy(cachep, &list);
3902 	}
3903 	free_percpu(prev);
3904 
3905 setup_node:
3906 	return setup_kmem_cache_nodes(cachep, gfp);
3907 }
3908 
3909 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3910 				int batchcount, int shared, gfp_t gfp)
3911 {
3912 	int ret;
3913 	struct kmem_cache *c;
3914 
3915 	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3916 
3917 	if (slab_state < FULL)
3918 		return ret;
3919 
3920 	if ((ret < 0) || !is_root_cache(cachep))
3921 		return ret;
3922 
3923 	lockdep_assert_held(&slab_mutex);
3924 	for_each_memcg_cache(c, cachep) {
3925 		/* return value determined by the root cache only */
3926 		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3927 	}
3928 
3929 	return ret;
3930 }
3931 
3932 /* Called with slab_mutex held always */
3933 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3934 {
3935 	int err;
3936 	int limit = 0;
3937 	int shared = 0;
3938 	int batchcount = 0;
3939 
3940 	err = cache_random_seq_create(cachep, cachep->num, gfp);
3941 	if (err)
3942 		goto end;
3943 
3944 	if (!is_root_cache(cachep)) {
3945 		struct kmem_cache *root = memcg_root_cache(cachep);
3946 		limit = root->limit;
3947 		shared = root->shared;
3948 		batchcount = root->batchcount;
3949 	}
3950 
3951 	if (limit && shared && batchcount)
3952 		goto skip_setup;
3953 	/*
3954 	 * The head array serves three purposes:
3955 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3956 	 * - reduce the number of spinlock operations.
3957 	 * - reduce the number of linked list operations on the slab and
3958 	 *   bufctl chains: array operations are cheaper.
3959 	 * The numbers are guessed, we should auto-tune as described by
3960 	 * Bonwick.
3961 	 */
3962 	if (cachep->size > 131072)
3963 		limit = 1;
3964 	else if (cachep->size > PAGE_SIZE)
3965 		limit = 8;
3966 	else if (cachep->size > 1024)
3967 		limit = 24;
3968 	else if (cachep->size > 256)
3969 		limit = 54;
3970 	else
3971 		limit = 120;
3972 
3973 	/*
3974 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3975 	 * allocation behaviour: Most allocs on one cpu, most free operations
3976 	 * on another cpu. For these cases, an efficient object passing between
3977 	 * cpus is necessary. This is provided by a shared array. The array
3978 	 * replaces Bonwick's magazine layer.
3979 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3980 	 * to a larger limit. Thus disabled by default.
3981 	 */
3982 	shared = 0;
3983 	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3984 		shared = 8;
3985 
3986 #if DEBUG
3987 	/*
3988 	 * With debugging enabled, large batchcount lead to excessively long
3989 	 * periods with disabled local interrupts. Limit the batchcount
3990 	 */
3991 	if (limit > 32)
3992 		limit = 32;
3993 #endif
3994 	batchcount = (limit + 1) / 2;
3995 skip_setup:
3996 	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3997 end:
3998 	if (err)
3999 		pr_err("enable_cpucache failed for %s, error %d\n",
4000 		       cachep->name, -err);
4001 	return err;
4002 }
4003 
4004 /*
4005  * Drain an array if it contains any elements taking the node lock only if
4006  * necessary. Note that the node listlock also protects the array_cache
4007  * if drain_array() is used on the shared array.
4008  */
4009 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4010 			 struct array_cache *ac, int node)
4011 {
4012 	LIST_HEAD(list);
4013 
4014 	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
4015 	check_mutex_acquired();
4016 
4017 	if (!ac || !ac->avail)
4018 		return;
4019 
4020 	if (ac->touched) {
4021 		ac->touched = 0;
4022 		return;
4023 	}
4024 
4025 	spin_lock_irq(&n->list_lock);
4026 	drain_array_locked(cachep, ac, node, false, &list);
4027 	spin_unlock_irq(&n->list_lock);
4028 
4029 	slabs_destroy(cachep, &list);
4030 }
4031 
4032 /**
4033  * cache_reap - Reclaim memory from caches.
4034  * @w: work descriptor
4035  *
4036  * Called from workqueue/eventd every few seconds.
4037  * Purpose:
4038  * - clear the per-cpu caches for this CPU.
4039  * - return freeable pages to the main free memory pool.
4040  *
4041  * If we cannot acquire the cache chain mutex then just give up - we'll try
4042  * again on the next iteration.
4043  */
4044 static void cache_reap(struct work_struct *w)
4045 {
4046 	struct kmem_cache *searchp;
4047 	struct kmem_cache_node *n;
4048 	int node = numa_mem_id();
4049 	struct delayed_work *work = to_delayed_work(w);
4050 
4051 	if (!mutex_trylock(&slab_mutex))
4052 		/* Give up. Setup the next iteration. */
4053 		goto out;
4054 
4055 	list_for_each_entry(searchp, &slab_caches, list) {
4056 		check_irq_on();
4057 
4058 		/*
4059 		 * We only take the node lock if absolutely necessary and we
4060 		 * have established with reasonable certainty that
4061 		 * we can do some work if the lock was obtained.
4062 		 */
4063 		n = get_node(searchp, node);
4064 
4065 		reap_alien(searchp, n);
4066 
4067 		drain_array(searchp, n, cpu_cache_get(searchp), node);
4068 
4069 		/*
4070 		 * These are racy checks but it does not matter
4071 		 * if we skip one check or scan twice.
4072 		 */
4073 		if (time_after(n->next_reap, jiffies))
4074 			goto next;
4075 
4076 		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4077 
4078 		drain_array(searchp, n, n->shared, node);
4079 
4080 		if (n->free_touched)
4081 			n->free_touched = 0;
4082 		else {
4083 			int freed;
4084 
4085 			freed = drain_freelist(searchp, n, (n->free_limit +
4086 				5 * searchp->num - 1) / (5 * searchp->num));
4087 			STATS_ADD_REAPED(searchp, freed);
4088 		}
4089 next:
4090 		cond_resched();
4091 	}
4092 	check_irq_on();
4093 	mutex_unlock(&slab_mutex);
4094 	next_reap_node();
4095 out:
4096 	/* Set up the next iteration */
4097 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
4098 }
4099 
4100 #ifdef CONFIG_SLABINFO
4101 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4102 {
4103 	unsigned long active_objs, num_objs, active_slabs;
4104 	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4105 	unsigned long free_slabs = 0;
4106 	int node;
4107 	struct kmem_cache_node *n;
4108 
4109 	for_each_kmem_cache_node(cachep, node, n) {
4110 		check_irq_on();
4111 		spin_lock_irq(&n->list_lock);
4112 
4113 		total_slabs += n->total_slabs;
4114 		free_slabs += n->free_slabs;
4115 		free_objs += n->free_objects;
4116 
4117 		if (n->shared)
4118 			shared_avail += n->shared->avail;
4119 
4120 		spin_unlock_irq(&n->list_lock);
4121 	}
4122 	num_objs = total_slabs * cachep->num;
4123 	active_slabs = total_slabs - free_slabs;
4124 	active_objs = num_objs - free_objs;
4125 
4126 	sinfo->active_objs = active_objs;
4127 	sinfo->num_objs = num_objs;
4128 	sinfo->active_slabs = active_slabs;
4129 	sinfo->num_slabs = total_slabs;
4130 	sinfo->shared_avail = shared_avail;
4131 	sinfo->limit = cachep->limit;
4132 	sinfo->batchcount = cachep->batchcount;
4133 	sinfo->shared = cachep->shared;
4134 	sinfo->objects_per_slab = cachep->num;
4135 	sinfo->cache_order = cachep->gfporder;
4136 }
4137 
4138 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4139 {
4140 #if STATS
4141 	{			/* node stats */
4142 		unsigned long high = cachep->high_mark;
4143 		unsigned long allocs = cachep->num_allocations;
4144 		unsigned long grown = cachep->grown;
4145 		unsigned long reaped = cachep->reaped;
4146 		unsigned long errors = cachep->errors;
4147 		unsigned long max_freeable = cachep->max_freeable;
4148 		unsigned long node_allocs = cachep->node_allocs;
4149 		unsigned long node_frees = cachep->node_frees;
4150 		unsigned long overflows = cachep->node_overflow;
4151 
4152 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4153 			   allocs, high, grown,
4154 			   reaped, errors, max_freeable, node_allocs,
4155 			   node_frees, overflows);
4156 	}
4157 	/* cpu stats */
4158 	{
4159 		unsigned long allochit = atomic_read(&cachep->allochit);
4160 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4161 		unsigned long freehit = atomic_read(&cachep->freehit);
4162 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4163 
4164 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4165 			   allochit, allocmiss, freehit, freemiss);
4166 	}
4167 #endif
4168 }
4169 
4170 #define MAX_SLABINFO_WRITE 128
4171 /**
4172  * slabinfo_write - Tuning for the slab allocator
4173  * @file: unused
4174  * @buffer: user buffer
4175  * @count: data length
4176  * @ppos: unused
4177  */
4178 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4179 		       size_t count, loff_t *ppos)
4180 {
4181 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4182 	int limit, batchcount, shared, res;
4183 	struct kmem_cache *cachep;
4184 
4185 	if (count > MAX_SLABINFO_WRITE)
4186 		return -EINVAL;
4187 	if (copy_from_user(&kbuf, buffer, count))
4188 		return -EFAULT;
4189 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4190 
4191 	tmp = strchr(kbuf, ' ');
4192 	if (!tmp)
4193 		return -EINVAL;
4194 	*tmp = '\0';
4195 	tmp++;
4196 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4197 		return -EINVAL;
4198 
4199 	/* Find the cache in the chain of caches. */
4200 	mutex_lock(&slab_mutex);
4201 	res = -EINVAL;
4202 	list_for_each_entry(cachep, &slab_caches, list) {
4203 		if (!strcmp(cachep->name, kbuf)) {
4204 			if (limit < 1 || batchcount < 1 ||
4205 					batchcount > limit || shared < 0) {
4206 				res = 0;
4207 			} else {
4208 				res = do_tune_cpucache(cachep, limit,
4209 						       batchcount, shared,
4210 						       GFP_KERNEL);
4211 			}
4212 			break;
4213 		}
4214 	}
4215 	mutex_unlock(&slab_mutex);
4216 	if (res >= 0)
4217 		res = count;
4218 	return res;
4219 }
4220 
4221 #ifdef CONFIG_DEBUG_SLAB_LEAK
4222 
4223 static inline int add_caller(unsigned long *n, unsigned long v)
4224 {
4225 	unsigned long *p;
4226 	int l;
4227 	if (!v)
4228 		return 1;
4229 	l = n[1];
4230 	p = n + 2;
4231 	while (l) {
4232 		int i = l/2;
4233 		unsigned long *q = p + 2 * i;
4234 		if (*q == v) {
4235 			q[1]++;
4236 			return 1;
4237 		}
4238 		if (*q > v) {
4239 			l = i;
4240 		} else {
4241 			p = q + 2;
4242 			l -= i + 1;
4243 		}
4244 	}
4245 	if (++n[1] == n[0])
4246 		return 0;
4247 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4248 	p[0] = v;
4249 	p[1] = 1;
4250 	return 1;
4251 }
4252 
4253 static void handle_slab(unsigned long *n, struct kmem_cache *c,
4254 						struct page *page)
4255 {
4256 	void *p;
4257 	int i, j;
4258 	unsigned long v;
4259 
4260 	if (n[0] == n[1])
4261 		return;
4262 	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4263 		bool active = true;
4264 
4265 		for (j = page->active; j < c->num; j++) {
4266 			if (get_free_obj(page, j) == i) {
4267 				active = false;
4268 				break;
4269 			}
4270 		}
4271 
4272 		if (!active)
4273 			continue;
4274 
4275 		/*
4276 		 * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
4277 		 * mapping is established when actual object allocation and
4278 		 * we could mistakenly access the unmapped object in the cpu
4279 		 * cache.
4280 		 */
4281 		if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4282 			continue;
4283 
4284 		if (!add_caller(n, v))
4285 			return;
4286 	}
4287 }
4288 
4289 static void show_symbol(struct seq_file *m, unsigned long address)
4290 {
4291 #ifdef CONFIG_KALLSYMS
4292 	unsigned long offset, size;
4293 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4294 
4295 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4296 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4297 		if (modname[0])
4298 			seq_printf(m, " [%s]", modname);
4299 		return;
4300 	}
4301 #endif
4302 	seq_printf(m, "%p", (void *)address);
4303 }
4304 
4305 static int leaks_show(struct seq_file *m, void *p)
4306 {
4307 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4308 	struct page *page;
4309 	struct kmem_cache_node *n;
4310 	const char *name;
4311 	unsigned long *x = m->private;
4312 	int node;
4313 	int i;
4314 
4315 	if (!(cachep->flags & SLAB_STORE_USER))
4316 		return 0;
4317 	if (!(cachep->flags & SLAB_RED_ZONE))
4318 		return 0;
4319 
4320 	/*
4321 	 * Set store_user_clean and start to grab stored user information
4322 	 * for all objects on this cache. If some alloc/free requests comes
4323 	 * during the processing, information would be wrong so restart
4324 	 * whole processing.
4325 	 */
4326 	do {
4327 		set_store_user_clean(cachep);
4328 		drain_cpu_caches(cachep);
4329 
4330 		x[1] = 0;
4331 
4332 		for_each_kmem_cache_node(cachep, node, n) {
4333 
4334 			check_irq_on();
4335 			spin_lock_irq(&n->list_lock);
4336 
4337 			list_for_each_entry(page, &n->slabs_full, lru)
4338 				handle_slab(x, cachep, page);
4339 			list_for_each_entry(page, &n->slabs_partial, lru)
4340 				handle_slab(x, cachep, page);
4341 			spin_unlock_irq(&n->list_lock);
4342 		}
4343 	} while (!is_store_user_clean(cachep));
4344 
4345 	name = cachep->name;
4346 	if (x[0] == x[1]) {
4347 		/* Increase the buffer size */
4348 		mutex_unlock(&slab_mutex);
4349 		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4350 		if (!m->private) {
4351 			/* Too bad, we are really out */
4352 			m->private = x;
4353 			mutex_lock(&slab_mutex);
4354 			return -ENOMEM;
4355 		}
4356 		*(unsigned long *)m->private = x[0] * 2;
4357 		kfree(x);
4358 		mutex_lock(&slab_mutex);
4359 		/* Now make sure this entry will be retried */
4360 		m->count = m->size;
4361 		return 0;
4362 	}
4363 	for (i = 0; i < x[1]; i++) {
4364 		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4365 		show_symbol(m, x[2*i+2]);
4366 		seq_putc(m, '\n');
4367 	}
4368 
4369 	return 0;
4370 }
4371 
4372 static const struct seq_operations slabstats_op = {
4373 	.start = slab_start,
4374 	.next = slab_next,
4375 	.stop = slab_stop,
4376 	.show = leaks_show,
4377 };
4378 
4379 static int slabstats_open(struct inode *inode, struct file *file)
4380 {
4381 	unsigned long *n;
4382 
4383 	n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4384 	if (!n)
4385 		return -ENOMEM;
4386 
4387 	*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4388 
4389 	return 0;
4390 }
4391 
4392 static const struct file_operations proc_slabstats_operations = {
4393 	.open		= slabstats_open,
4394 	.read		= seq_read,
4395 	.llseek		= seq_lseek,
4396 	.release	= seq_release_private,
4397 };
4398 #endif
4399 
4400 static int __init slab_proc_init(void)
4401 {
4402 #ifdef CONFIG_DEBUG_SLAB_LEAK
4403 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4404 #endif
4405 	return 0;
4406 }
4407 module_init(slab_proc_init);
4408 #endif
4409 
4410 #ifdef CONFIG_HARDENED_USERCOPY
4411 /*
4412  * Rejects objects that are incorrectly sized.
4413  *
4414  * Returns NULL if check passes, otherwise const char * to name of cache
4415  * to indicate an error.
4416  */
4417 const char *__check_heap_object(const void *ptr, unsigned long n,
4418 				struct page *page)
4419 {
4420 	struct kmem_cache *cachep;
4421 	unsigned int objnr;
4422 	unsigned long offset;
4423 
4424 	/* Find and validate object. */
4425 	cachep = page->slab_cache;
4426 	objnr = obj_to_index(cachep, page, (void *)ptr);
4427 	BUG_ON(objnr >= cachep->num);
4428 
4429 	/* Find offset within object. */
4430 	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4431 
4432 	/* Allow address range falling entirely within object size. */
4433 	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4434 		return NULL;
4435 
4436 	return cachep->name;
4437 }
4438 #endif /* CONFIG_HARDENED_USERCOPY */
4439 
4440 /**
4441  * ksize - get the actual amount of memory allocated for a given object
4442  * @objp: Pointer to the object
4443  *
4444  * kmalloc may internally round up allocations and return more memory
4445  * than requested. ksize() can be used to determine the actual amount of
4446  * memory allocated. The caller may use this additional memory, even though
4447  * a smaller amount of memory was initially specified with the kmalloc call.
4448  * The caller must guarantee that objp points to a valid object previously
4449  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4450  * must not be freed during the duration of the call.
4451  */
4452 size_t ksize(const void *objp)
4453 {
4454 	size_t size;
4455 
4456 	BUG_ON(!objp);
4457 	if (unlikely(objp == ZERO_SIZE_PTR))
4458 		return 0;
4459 
4460 	size = virt_to_cache(objp)->object_size;
4461 	/* We assume that ksize callers could use the whole allocated area,
4462 	 * so we need to unpoison this area.
4463 	 */
4464 	kasan_unpoison_shadow(objp, size);
4465 
4466 	return size;
4467 }
4468 EXPORT_SYMBOL(ksize);
4469