xref: /openbmc/linux/mm/slab_common.c (revision 305c8388)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Slab allocator functions that are independent of the allocator strategy
4  *
5  * (C) 2012 Christoph Lameter <cl@linux.com>
6  */
7 #include <linux/slab.h>
8 
9 #include <linux/mm.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/proc_fs.h>
20 #include <linux/debugfs.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 #include <asm/page.h>
24 #include <linux/memcontrol.h>
25 
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/kmem.h>
28 
29 #include "slab.h"
30 
31 enum slab_state slab_state;
32 LIST_HEAD(slab_caches);
33 DEFINE_MUTEX(slab_mutex);
34 struct kmem_cache *kmem_cache;
35 
36 #ifdef CONFIG_HARDENED_USERCOPY
37 bool usercopy_fallback __ro_after_init =
38 		IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
39 module_param(usercopy_fallback, bool, 0400);
40 MODULE_PARM_DESC(usercopy_fallback,
41 		"WARN instead of reject usercopy whitelist violations");
42 #endif
43 
44 static LIST_HEAD(slab_caches_to_rcu_destroy);
45 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
46 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
47 		    slab_caches_to_rcu_destroy_workfn);
48 
49 /*
50  * Set of flags that will prevent slab merging
51  */
52 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
53 		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
54 		SLAB_FAILSLAB | SLAB_KASAN)
55 
56 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
57 			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
58 
59 /*
60  * Merge control. If this is set then no merging of slab caches will occur.
61  */
62 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
63 
64 static int __init setup_slab_nomerge(char *str)
65 {
66 	slab_nomerge = true;
67 	return 1;
68 }
69 
70 #ifdef CONFIG_SLUB
71 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
72 #endif
73 
74 __setup("slab_nomerge", setup_slab_nomerge);
75 
76 /*
77  * Determine the size of a slab object
78  */
79 unsigned int kmem_cache_size(struct kmem_cache *s)
80 {
81 	return s->object_size;
82 }
83 EXPORT_SYMBOL(kmem_cache_size);
84 
85 #ifdef CONFIG_DEBUG_VM
86 static int kmem_cache_sanity_check(const char *name, unsigned int size)
87 {
88 	if (!name || in_interrupt() || size < sizeof(void *) ||
89 		size > KMALLOC_MAX_SIZE) {
90 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
91 		return -EINVAL;
92 	}
93 
94 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
95 	return 0;
96 }
97 #else
98 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
99 {
100 	return 0;
101 }
102 #endif
103 
104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
105 {
106 	size_t i;
107 
108 	for (i = 0; i < nr; i++) {
109 		if (s)
110 			kmem_cache_free(s, p[i]);
111 		else
112 			kfree(p[i]);
113 	}
114 }
115 
116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
117 								void **p)
118 {
119 	size_t i;
120 
121 	for (i = 0; i < nr; i++) {
122 		void *x = p[i] = kmem_cache_alloc(s, flags);
123 		if (!x) {
124 			__kmem_cache_free_bulk(s, i, p);
125 			return 0;
126 		}
127 	}
128 	return i;
129 }
130 
131 #ifdef CONFIG_MEMCG_KMEM
132 
133 LIST_HEAD(slab_root_caches);
134 static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
135 
136 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
137 
138 void slab_init_memcg_params(struct kmem_cache *s)
139 {
140 	s->memcg_params.root_cache = NULL;
141 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
142 	INIT_LIST_HEAD(&s->memcg_params.children);
143 	s->memcg_params.dying = false;
144 }
145 
146 static int init_memcg_params(struct kmem_cache *s,
147 			     struct kmem_cache *root_cache)
148 {
149 	struct memcg_cache_array *arr;
150 
151 	if (root_cache) {
152 		int ret = percpu_ref_init(&s->memcg_params.refcnt,
153 					  kmemcg_cache_shutdown,
154 					  0, GFP_KERNEL);
155 		if (ret)
156 			return ret;
157 
158 		s->memcg_params.root_cache = root_cache;
159 		INIT_LIST_HEAD(&s->memcg_params.children_node);
160 		INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
161 		return 0;
162 	}
163 
164 	slab_init_memcg_params(s);
165 
166 	if (!memcg_nr_cache_ids)
167 		return 0;
168 
169 	arr = kvzalloc(sizeof(struct memcg_cache_array) +
170 		       memcg_nr_cache_ids * sizeof(void *),
171 		       GFP_KERNEL);
172 	if (!arr)
173 		return -ENOMEM;
174 
175 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
176 	return 0;
177 }
178 
179 static void destroy_memcg_params(struct kmem_cache *s)
180 {
181 	if (is_root_cache(s)) {
182 		kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
183 	} else {
184 		mem_cgroup_put(s->memcg_params.memcg);
185 		WRITE_ONCE(s->memcg_params.memcg, NULL);
186 		percpu_ref_exit(&s->memcg_params.refcnt);
187 	}
188 }
189 
190 static void free_memcg_params(struct rcu_head *rcu)
191 {
192 	struct memcg_cache_array *old;
193 
194 	old = container_of(rcu, struct memcg_cache_array, rcu);
195 	kvfree(old);
196 }
197 
198 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
199 {
200 	struct memcg_cache_array *old, *new;
201 
202 	new = kvzalloc(sizeof(struct memcg_cache_array) +
203 		       new_array_size * sizeof(void *), GFP_KERNEL);
204 	if (!new)
205 		return -ENOMEM;
206 
207 	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
208 					lockdep_is_held(&slab_mutex));
209 	if (old)
210 		memcpy(new->entries, old->entries,
211 		       memcg_nr_cache_ids * sizeof(void *));
212 
213 	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
214 	if (old)
215 		call_rcu(&old->rcu, free_memcg_params);
216 	return 0;
217 }
218 
219 int memcg_update_all_caches(int num_memcgs)
220 {
221 	struct kmem_cache *s;
222 	int ret = 0;
223 
224 	mutex_lock(&slab_mutex);
225 	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
226 		ret = update_memcg_params(s, num_memcgs);
227 		/*
228 		 * Instead of freeing the memory, we'll just leave the caches
229 		 * up to this point in an updated state.
230 		 */
231 		if (ret)
232 			break;
233 	}
234 	mutex_unlock(&slab_mutex);
235 	return ret;
236 }
237 
238 void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
239 {
240 	if (is_root_cache(s)) {
241 		list_add(&s->root_caches_node, &slab_root_caches);
242 	} else {
243 		css_get(&memcg->css);
244 		s->memcg_params.memcg = memcg;
245 		list_add(&s->memcg_params.children_node,
246 			 &s->memcg_params.root_cache->memcg_params.children);
247 		list_add(&s->memcg_params.kmem_caches_node,
248 			 &s->memcg_params.memcg->kmem_caches);
249 	}
250 }
251 
252 static void memcg_unlink_cache(struct kmem_cache *s)
253 {
254 	if (is_root_cache(s)) {
255 		list_del(&s->root_caches_node);
256 	} else {
257 		list_del(&s->memcg_params.children_node);
258 		list_del(&s->memcg_params.kmem_caches_node);
259 	}
260 }
261 #else
262 static inline int init_memcg_params(struct kmem_cache *s,
263 				    struct kmem_cache *root_cache)
264 {
265 	return 0;
266 }
267 
268 static inline void destroy_memcg_params(struct kmem_cache *s)
269 {
270 }
271 
272 static inline void memcg_unlink_cache(struct kmem_cache *s)
273 {
274 }
275 #endif /* CONFIG_MEMCG_KMEM */
276 
277 /*
278  * Figure out what the alignment of the objects will be given a set of
279  * flags, a user specified alignment and the size of the objects.
280  */
281 static unsigned int calculate_alignment(slab_flags_t flags,
282 		unsigned int align, unsigned int size)
283 {
284 	/*
285 	 * If the user wants hardware cache aligned objects then follow that
286 	 * suggestion if the object is sufficiently large.
287 	 *
288 	 * The hardware cache alignment cannot override the specified
289 	 * alignment though. If that is greater then use it.
290 	 */
291 	if (flags & SLAB_HWCACHE_ALIGN) {
292 		unsigned int ralign;
293 
294 		ralign = cache_line_size();
295 		while (size <= ralign / 2)
296 			ralign /= 2;
297 		align = max(align, ralign);
298 	}
299 
300 	if (align < ARCH_SLAB_MINALIGN)
301 		align = ARCH_SLAB_MINALIGN;
302 
303 	return ALIGN(align, sizeof(void *));
304 }
305 
306 /*
307  * Find a mergeable slab cache
308  */
309 int slab_unmergeable(struct kmem_cache *s)
310 {
311 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
312 		return 1;
313 
314 	if (!is_root_cache(s))
315 		return 1;
316 
317 	if (s->ctor)
318 		return 1;
319 
320 	if (s->usersize)
321 		return 1;
322 
323 	/*
324 	 * We may have set a slab to be unmergeable during bootstrap.
325 	 */
326 	if (s->refcount < 0)
327 		return 1;
328 
329 	return 0;
330 }
331 
332 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
333 		slab_flags_t flags, const char *name, void (*ctor)(void *))
334 {
335 	struct kmem_cache *s;
336 
337 	if (slab_nomerge)
338 		return NULL;
339 
340 	if (ctor)
341 		return NULL;
342 
343 	size = ALIGN(size, sizeof(void *));
344 	align = calculate_alignment(flags, align, size);
345 	size = ALIGN(size, align);
346 	flags = kmem_cache_flags(size, flags, name, NULL);
347 
348 	if (flags & SLAB_NEVER_MERGE)
349 		return NULL;
350 
351 	list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
352 		if (slab_unmergeable(s))
353 			continue;
354 
355 		if (size > s->size)
356 			continue;
357 
358 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
359 			continue;
360 		/*
361 		 * Check if alignment is compatible.
362 		 * Courtesy of Adrian Drzewiecki
363 		 */
364 		if ((s->size & ~(align - 1)) != s->size)
365 			continue;
366 
367 		if (s->size - size >= sizeof(void *))
368 			continue;
369 
370 		if (IS_ENABLED(CONFIG_SLAB) && align &&
371 			(align > s->align || s->align % align))
372 			continue;
373 
374 		return s;
375 	}
376 	return NULL;
377 }
378 
379 static struct kmem_cache *create_cache(const char *name,
380 		unsigned int object_size, unsigned int align,
381 		slab_flags_t flags, unsigned int useroffset,
382 		unsigned int usersize, void (*ctor)(void *),
383 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
384 {
385 	struct kmem_cache *s;
386 	int err;
387 
388 	if (WARN_ON(useroffset + usersize > object_size))
389 		useroffset = usersize = 0;
390 
391 	err = -ENOMEM;
392 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
393 	if (!s)
394 		goto out;
395 
396 	s->name = name;
397 	s->size = s->object_size = object_size;
398 	s->align = align;
399 	s->ctor = ctor;
400 	s->useroffset = useroffset;
401 	s->usersize = usersize;
402 
403 	err = init_memcg_params(s, root_cache);
404 	if (err)
405 		goto out_free_cache;
406 
407 	err = __kmem_cache_create(s, flags);
408 	if (err)
409 		goto out_free_cache;
410 
411 	s->refcount = 1;
412 	list_add(&s->list, &slab_caches);
413 	memcg_link_cache(s, memcg);
414 out:
415 	if (err)
416 		return ERR_PTR(err);
417 	return s;
418 
419 out_free_cache:
420 	destroy_memcg_params(s);
421 	kmem_cache_free(kmem_cache, s);
422 	goto out;
423 }
424 
425 /**
426  * kmem_cache_create_usercopy - Create a cache with a region suitable
427  * for copying to userspace
428  * @name: A string which is used in /proc/slabinfo to identify this cache.
429  * @size: The size of objects to be created in this cache.
430  * @align: The required alignment for the objects.
431  * @flags: SLAB flags
432  * @useroffset: Usercopy region offset
433  * @usersize: Usercopy region size
434  * @ctor: A constructor for the objects.
435  *
436  * Cannot be called within a interrupt, but can be interrupted.
437  * The @ctor is run when new pages are allocated by the cache.
438  *
439  * The flags are
440  *
441  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
442  * to catch references to uninitialised memory.
443  *
444  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
445  * for buffer overruns.
446  *
447  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
448  * cacheline.  This can be beneficial if you're counting cycles as closely
449  * as davem.
450  *
451  * Return: a pointer to the cache on success, NULL on failure.
452  */
453 struct kmem_cache *
454 kmem_cache_create_usercopy(const char *name,
455 		  unsigned int size, unsigned int align,
456 		  slab_flags_t flags,
457 		  unsigned int useroffset, unsigned int usersize,
458 		  void (*ctor)(void *))
459 {
460 	struct kmem_cache *s = NULL;
461 	const char *cache_name;
462 	int err;
463 
464 	get_online_cpus();
465 	get_online_mems();
466 	memcg_get_cache_ids();
467 
468 	mutex_lock(&slab_mutex);
469 
470 	err = kmem_cache_sanity_check(name, size);
471 	if (err) {
472 		goto out_unlock;
473 	}
474 
475 	/* Refuse requests with allocator specific flags */
476 	if (flags & ~SLAB_FLAGS_PERMITTED) {
477 		err = -EINVAL;
478 		goto out_unlock;
479 	}
480 
481 	/*
482 	 * Some allocators will constraint the set of valid flags to a subset
483 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
484 	 * case, and we'll just provide them with a sanitized version of the
485 	 * passed flags.
486 	 */
487 	flags &= CACHE_CREATE_MASK;
488 
489 	/* Fail closed on bad usersize of useroffset values. */
490 	if (WARN_ON(!usersize && useroffset) ||
491 	    WARN_ON(size < usersize || size - usersize < useroffset))
492 		usersize = useroffset = 0;
493 
494 	if (!usersize)
495 		s = __kmem_cache_alias(name, size, align, flags, ctor);
496 	if (s)
497 		goto out_unlock;
498 
499 	cache_name = kstrdup_const(name, GFP_KERNEL);
500 	if (!cache_name) {
501 		err = -ENOMEM;
502 		goto out_unlock;
503 	}
504 
505 	s = create_cache(cache_name, size,
506 			 calculate_alignment(flags, align, size),
507 			 flags, useroffset, usersize, ctor, NULL, NULL);
508 	if (IS_ERR(s)) {
509 		err = PTR_ERR(s);
510 		kfree_const(cache_name);
511 	}
512 
513 out_unlock:
514 	mutex_unlock(&slab_mutex);
515 
516 	memcg_put_cache_ids();
517 	put_online_mems();
518 	put_online_cpus();
519 
520 	if (err) {
521 		if (flags & SLAB_PANIC)
522 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
523 				name, err);
524 		else {
525 			pr_warn("kmem_cache_create(%s) failed with error %d\n",
526 				name, err);
527 			dump_stack();
528 		}
529 		return NULL;
530 	}
531 	return s;
532 }
533 EXPORT_SYMBOL(kmem_cache_create_usercopy);
534 
535 /**
536  * kmem_cache_create - Create a cache.
537  * @name: A string which is used in /proc/slabinfo to identify this cache.
538  * @size: The size of objects to be created in this cache.
539  * @align: The required alignment for the objects.
540  * @flags: SLAB flags
541  * @ctor: A constructor for the objects.
542  *
543  * Cannot be called within a interrupt, but can be interrupted.
544  * The @ctor is run when new pages are allocated by the cache.
545  *
546  * The flags are
547  *
548  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
549  * to catch references to uninitialised memory.
550  *
551  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
552  * for buffer overruns.
553  *
554  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
555  * cacheline.  This can be beneficial if you're counting cycles as closely
556  * as davem.
557  *
558  * Return: a pointer to the cache on success, NULL on failure.
559  */
560 struct kmem_cache *
561 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
562 		slab_flags_t flags, void (*ctor)(void *))
563 {
564 	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
565 					  ctor);
566 }
567 EXPORT_SYMBOL(kmem_cache_create);
568 
569 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
570 {
571 	LIST_HEAD(to_destroy);
572 	struct kmem_cache *s, *s2;
573 
574 	/*
575 	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
576 	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
577 	 * through RCU and and the associated kmem_cache are dereferenced
578 	 * while freeing the pages, so the kmem_caches should be freed only
579 	 * after the pending RCU operations are finished.  As rcu_barrier()
580 	 * is a pretty slow operation, we batch all pending destructions
581 	 * asynchronously.
582 	 */
583 	mutex_lock(&slab_mutex);
584 	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
585 	mutex_unlock(&slab_mutex);
586 
587 	if (list_empty(&to_destroy))
588 		return;
589 
590 	rcu_barrier();
591 
592 	list_for_each_entry_safe(s, s2, &to_destroy, list) {
593 #ifdef SLAB_SUPPORTS_SYSFS
594 		sysfs_slab_release(s);
595 #else
596 		slab_kmem_cache_release(s);
597 #endif
598 	}
599 }
600 
601 static int shutdown_cache(struct kmem_cache *s)
602 {
603 	/* free asan quarantined objects */
604 	kasan_cache_shutdown(s);
605 
606 	if (__kmem_cache_shutdown(s) != 0)
607 		return -EBUSY;
608 
609 	memcg_unlink_cache(s);
610 	list_del(&s->list);
611 
612 	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
613 #ifdef SLAB_SUPPORTS_SYSFS
614 		sysfs_slab_unlink(s);
615 #endif
616 		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
617 		schedule_work(&slab_caches_to_rcu_destroy_work);
618 	} else {
619 #ifdef SLAB_SUPPORTS_SYSFS
620 		sysfs_slab_unlink(s);
621 		sysfs_slab_release(s);
622 #else
623 		slab_kmem_cache_release(s);
624 #endif
625 	}
626 
627 	return 0;
628 }
629 
630 #ifdef CONFIG_MEMCG_KMEM
631 /*
632  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
633  * @memcg: The memory cgroup the new cache is for.
634  * @root_cache: The parent of the new cache.
635  *
636  * This function attempts to create a kmem cache that will serve allocation
637  * requests going from @memcg to @root_cache. The new cache inherits properties
638  * from its parent.
639  */
640 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
641 			     struct kmem_cache *root_cache)
642 {
643 	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
644 	struct cgroup_subsys_state *css = &memcg->css;
645 	struct memcg_cache_array *arr;
646 	struct kmem_cache *s = NULL;
647 	char *cache_name;
648 	int idx;
649 
650 	get_online_cpus();
651 	get_online_mems();
652 
653 	mutex_lock(&slab_mutex);
654 
655 	/*
656 	 * The memory cgroup could have been offlined while the cache
657 	 * creation work was pending.
658 	 */
659 	if (memcg->kmem_state != KMEM_ONLINE)
660 		goto out_unlock;
661 
662 	idx = memcg_cache_id(memcg);
663 	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
664 					lockdep_is_held(&slab_mutex));
665 
666 	/*
667 	 * Since per-memcg caches are created asynchronously on first
668 	 * allocation (see memcg_kmem_get_cache()), several threads can try to
669 	 * create the same cache, but only one of them may succeed.
670 	 */
671 	if (arr->entries[idx])
672 		goto out_unlock;
673 
674 	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
675 	cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
676 			       css->serial_nr, memcg_name_buf);
677 	if (!cache_name)
678 		goto out_unlock;
679 
680 	s = create_cache(cache_name, root_cache->object_size,
681 			 root_cache->align,
682 			 root_cache->flags & CACHE_CREATE_MASK,
683 			 root_cache->useroffset, root_cache->usersize,
684 			 root_cache->ctor, memcg, root_cache);
685 	/*
686 	 * If we could not create a memcg cache, do not complain, because
687 	 * that's not critical at all as we can always proceed with the root
688 	 * cache.
689 	 */
690 	if (IS_ERR(s)) {
691 		kfree(cache_name);
692 		goto out_unlock;
693 	}
694 
695 	/*
696 	 * Since readers won't lock (see memcg_kmem_get_cache()), we need a
697 	 * barrier here to ensure nobody will see the kmem_cache partially
698 	 * initialized.
699 	 */
700 	smp_wmb();
701 	arr->entries[idx] = s;
702 
703 out_unlock:
704 	mutex_unlock(&slab_mutex);
705 
706 	put_online_mems();
707 	put_online_cpus();
708 }
709 
710 static void kmemcg_workfn(struct work_struct *work)
711 {
712 	struct kmem_cache *s = container_of(work, struct kmem_cache,
713 					    memcg_params.work);
714 
715 	get_online_cpus();
716 	get_online_mems();
717 
718 	mutex_lock(&slab_mutex);
719 	s->memcg_params.work_fn(s);
720 	mutex_unlock(&slab_mutex);
721 
722 	put_online_mems();
723 	put_online_cpus();
724 }
725 
726 static void kmemcg_rcufn(struct rcu_head *head)
727 {
728 	struct kmem_cache *s = container_of(head, struct kmem_cache,
729 					    memcg_params.rcu_head);
730 
731 	/*
732 	 * We need to grab blocking locks.  Bounce to ->work.  The
733 	 * work item shares the space with the RCU head and can't be
734 	 * initialized earlier.
735 	 */
736 	INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
737 	queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
738 }
739 
740 static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
741 {
742 	WARN_ON(shutdown_cache(s));
743 }
744 
745 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
746 {
747 	struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
748 					    memcg_params.refcnt);
749 	unsigned long flags;
750 
751 	spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
752 	if (s->memcg_params.root_cache->memcg_params.dying)
753 		goto unlock;
754 
755 	s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
756 	INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
757 	queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
758 
759 unlock:
760 	spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
761 }
762 
763 static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
764 {
765 	__kmemcg_cache_deactivate_after_rcu(s);
766 	percpu_ref_kill(&s->memcg_params.refcnt);
767 }
768 
769 static void kmemcg_cache_deactivate(struct kmem_cache *s)
770 {
771 	if (WARN_ON_ONCE(is_root_cache(s)))
772 		return;
773 
774 	__kmemcg_cache_deactivate(s);
775 	s->flags |= SLAB_DEACTIVATED;
776 
777 	/*
778 	 * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
779 	 * flag and make sure that no new kmem_cache deactivation tasks
780 	 * are queued (see flush_memcg_workqueue() ).
781 	 */
782 	spin_lock_irq(&memcg_kmem_wq_lock);
783 	if (s->memcg_params.root_cache->memcg_params.dying)
784 		goto unlock;
785 
786 	s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
787 	call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
788 unlock:
789 	spin_unlock_irq(&memcg_kmem_wq_lock);
790 }
791 
792 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
793 				  struct mem_cgroup *parent)
794 {
795 	int idx;
796 	struct memcg_cache_array *arr;
797 	struct kmem_cache *s, *c;
798 	unsigned int nr_reparented;
799 
800 	idx = memcg_cache_id(memcg);
801 
802 	get_online_cpus();
803 	get_online_mems();
804 
805 	mutex_lock(&slab_mutex);
806 	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
807 		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
808 						lockdep_is_held(&slab_mutex));
809 		c = arr->entries[idx];
810 		if (!c)
811 			continue;
812 
813 		kmemcg_cache_deactivate(c);
814 		arr->entries[idx] = NULL;
815 	}
816 	nr_reparented = 0;
817 	list_for_each_entry(s, &memcg->kmem_caches,
818 			    memcg_params.kmem_caches_node) {
819 		WRITE_ONCE(s->memcg_params.memcg, parent);
820 		css_put(&memcg->css);
821 		nr_reparented++;
822 	}
823 	if (nr_reparented) {
824 		list_splice_init(&memcg->kmem_caches,
825 				 &parent->kmem_caches);
826 		css_get_many(&parent->css, nr_reparented);
827 	}
828 	mutex_unlock(&slab_mutex);
829 
830 	put_online_mems();
831 	put_online_cpus();
832 }
833 
834 static int shutdown_memcg_caches(struct kmem_cache *s)
835 {
836 	struct memcg_cache_array *arr;
837 	struct kmem_cache *c, *c2;
838 	LIST_HEAD(busy);
839 	int i;
840 
841 	BUG_ON(!is_root_cache(s));
842 
843 	/*
844 	 * First, shutdown active caches, i.e. caches that belong to online
845 	 * memory cgroups.
846 	 */
847 	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
848 					lockdep_is_held(&slab_mutex));
849 	for_each_memcg_cache_index(i) {
850 		c = arr->entries[i];
851 		if (!c)
852 			continue;
853 		if (shutdown_cache(c))
854 			/*
855 			 * The cache still has objects. Move it to a temporary
856 			 * list so as not to try to destroy it for a second
857 			 * time while iterating over inactive caches below.
858 			 */
859 			list_move(&c->memcg_params.children_node, &busy);
860 		else
861 			/*
862 			 * The cache is empty and will be destroyed soon. Clear
863 			 * the pointer to it in the memcg_caches array so that
864 			 * it will never be accessed even if the root cache
865 			 * stays alive.
866 			 */
867 			arr->entries[i] = NULL;
868 	}
869 
870 	/*
871 	 * Second, shutdown all caches left from memory cgroups that are now
872 	 * offline.
873 	 */
874 	list_for_each_entry_safe(c, c2, &s->memcg_params.children,
875 				 memcg_params.children_node)
876 		shutdown_cache(c);
877 
878 	list_splice(&busy, &s->memcg_params.children);
879 
880 	/*
881 	 * A cache being destroyed must be empty. In particular, this means
882 	 * that all per memcg caches attached to it must be empty too.
883 	 */
884 	if (!list_empty(&s->memcg_params.children))
885 		return -EBUSY;
886 	return 0;
887 }
888 
889 static void flush_memcg_workqueue(struct kmem_cache *s)
890 {
891 	spin_lock_irq(&memcg_kmem_wq_lock);
892 	s->memcg_params.dying = true;
893 	spin_unlock_irq(&memcg_kmem_wq_lock);
894 
895 	/*
896 	 * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
897 	 * sure all registered rcu callbacks have been invoked.
898 	 */
899 	rcu_barrier();
900 
901 	/*
902 	 * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
903 	 * deactivates the memcg kmem_caches through workqueue. Make sure all
904 	 * previous workitems on workqueue are processed.
905 	 */
906 	if (likely(memcg_kmem_cache_wq))
907 		flush_workqueue(memcg_kmem_cache_wq);
908 
909 	/*
910 	 * If we're racing with children kmem_cache deactivation, it might
911 	 * take another rcu grace period to complete their destruction.
912 	 * At this moment the corresponding percpu_ref_kill() call should be
913 	 * done, but it might take another rcu grace period to complete
914 	 * switching to the atomic mode.
915 	 * Please, note that we check without grabbing the slab_mutex. It's safe
916 	 * because at this moment the children list can't grow.
917 	 */
918 	if (!list_empty(&s->memcg_params.children))
919 		rcu_barrier();
920 }
921 #else
922 static inline int shutdown_memcg_caches(struct kmem_cache *s)
923 {
924 	return 0;
925 }
926 
927 static inline void flush_memcg_workqueue(struct kmem_cache *s)
928 {
929 }
930 #endif /* CONFIG_MEMCG_KMEM */
931 
932 void slab_kmem_cache_release(struct kmem_cache *s)
933 {
934 	__kmem_cache_release(s);
935 	destroy_memcg_params(s);
936 	kfree_const(s->name);
937 	kmem_cache_free(kmem_cache, s);
938 }
939 
940 void kmem_cache_destroy(struct kmem_cache *s)
941 {
942 	int err;
943 
944 	if (unlikely(!s))
945 		return;
946 
947 	flush_memcg_workqueue(s);
948 
949 	get_online_cpus();
950 	get_online_mems();
951 
952 	mutex_lock(&slab_mutex);
953 
954 	s->refcount--;
955 	if (s->refcount)
956 		goto out_unlock;
957 
958 	err = shutdown_memcg_caches(s);
959 	if (!err)
960 		err = shutdown_cache(s);
961 
962 	if (err) {
963 		pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
964 		       s->name);
965 		dump_stack();
966 	}
967 out_unlock:
968 	mutex_unlock(&slab_mutex);
969 
970 	put_online_mems();
971 	put_online_cpus();
972 }
973 EXPORT_SYMBOL(kmem_cache_destroy);
974 
975 /**
976  * kmem_cache_shrink - Shrink a cache.
977  * @cachep: The cache to shrink.
978  *
979  * Releases as many slabs as possible for a cache.
980  * To help debugging, a zero exit status indicates all slabs were released.
981  *
982  * Return: %0 if all slabs were released, non-zero otherwise
983  */
984 int kmem_cache_shrink(struct kmem_cache *cachep)
985 {
986 	int ret;
987 
988 	get_online_cpus();
989 	get_online_mems();
990 	kasan_cache_shrink(cachep);
991 	ret = __kmem_cache_shrink(cachep);
992 	put_online_mems();
993 	put_online_cpus();
994 	return ret;
995 }
996 EXPORT_SYMBOL(kmem_cache_shrink);
997 
998 /**
999  * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
1000  * @s: The cache pointer
1001  */
1002 void kmem_cache_shrink_all(struct kmem_cache *s)
1003 {
1004 	struct kmem_cache *c;
1005 
1006 	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
1007 		kmem_cache_shrink(s);
1008 		return;
1009 	}
1010 
1011 	get_online_cpus();
1012 	get_online_mems();
1013 	kasan_cache_shrink(s);
1014 	__kmem_cache_shrink(s);
1015 
1016 	/*
1017 	 * We have to take the slab_mutex to protect from the memcg list
1018 	 * modification.
1019 	 */
1020 	mutex_lock(&slab_mutex);
1021 	for_each_memcg_cache(c, s) {
1022 		/*
1023 		 * Don't need to shrink deactivated memcg caches.
1024 		 */
1025 		if (s->flags & SLAB_DEACTIVATED)
1026 			continue;
1027 		kasan_cache_shrink(c);
1028 		__kmem_cache_shrink(c);
1029 	}
1030 	mutex_unlock(&slab_mutex);
1031 	put_online_mems();
1032 	put_online_cpus();
1033 }
1034 
1035 bool slab_is_available(void)
1036 {
1037 	return slab_state >= UP;
1038 }
1039 
1040 #ifndef CONFIG_SLOB
1041 /* Create a cache during boot when no slab services are available yet */
1042 void __init create_boot_cache(struct kmem_cache *s, const char *name,
1043 		unsigned int size, slab_flags_t flags,
1044 		unsigned int useroffset, unsigned int usersize)
1045 {
1046 	int err;
1047 	unsigned int align = ARCH_KMALLOC_MINALIGN;
1048 
1049 	s->name = name;
1050 	s->size = s->object_size = size;
1051 
1052 	/*
1053 	 * For power of two sizes, guarantee natural alignment for kmalloc
1054 	 * caches, regardless of SL*B debugging options.
1055 	 */
1056 	if (is_power_of_2(size))
1057 		align = max(align, size);
1058 	s->align = calculate_alignment(flags, align, size);
1059 
1060 	s->useroffset = useroffset;
1061 	s->usersize = usersize;
1062 
1063 	slab_init_memcg_params(s);
1064 
1065 	err = __kmem_cache_create(s, flags);
1066 
1067 	if (err)
1068 		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
1069 					name, size, err);
1070 
1071 	s->refcount = -1;	/* Exempt from merging for now */
1072 }
1073 
1074 struct kmem_cache *__init create_kmalloc_cache(const char *name,
1075 		unsigned int size, slab_flags_t flags,
1076 		unsigned int useroffset, unsigned int usersize)
1077 {
1078 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1079 
1080 	if (!s)
1081 		panic("Out of memory when creating slab %s\n", name);
1082 
1083 	create_boot_cache(s, name, size, flags, useroffset, usersize);
1084 	list_add(&s->list, &slab_caches);
1085 	memcg_link_cache(s, NULL);
1086 	s->refcount = 1;
1087 	return s;
1088 }
1089 
1090 struct kmem_cache *
1091 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
1092 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
1093 EXPORT_SYMBOL(kmalloc_caches);
1094 
1095 /*
1096  * Conversion table for small slabs sizes / 8 to the index in the
1097  * kmalloc array. This is necessary for slabs < 192 since we have non power
1098  * of two cache sizes there. The size of larger slabs can be determined using
1099  * fls.
1100  */
1101 static u8 size_index[24] __ro_after_init = {
1102 	3,	/* 8 */
1103 	4,	/* 16 */
1104 	5,	/* 24 */
1105 	5,	/* 32 */
1106 	6,	/* 40 */
1107 	6,	/* 48 */
1108 	6,	/* 56 */
1109 	6,	/* 64 */
1110 	1,	/* 72 */
1111 	1,	/* 80 */
1112 	1,	/* 88 */
1113 	1,	/* 96 */
1114 	7,	/* 104 */
1115 	7,	/* 112 */
1116 	7,	/* 120 */
1117 	7,	/* 128 */
1118 	2,	/* 136 */
1119 	2,	/* 144 */
1120 	2,	/* 152 */
1121 	2,	/* 160 */
1122 	2,	/* 168 */
1123 	2,	/* 176 */
1124 	2,	/* 184 */
1125 	2	/* 192 */
1126 };
1127 
1128 static inline unsigned int size_index_elem(unsigned int bytes)
1129 {
1130 	return (bytes - 1) / 8;
1131 }
1132 
1133 /*
1134  * Find the kmem_cache structure that serves a given size of
1135  * allocation
1136  */
1137 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
1138 {
1139 	unsigned int index;
1140 
1141 	if (size <= 192) {
1142 		if (!size)
1143 			return ZERO_SIZE_PTR;
1144 
1145 		index = size_index[size_index_elem(size)];
1146 	} else {
1147 		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
1148 			return NULL;
1149 		index = fls(size - 1);
1150 	}
1151 
1152 	return kmalloc_caches[kmalloc_type(flags)][index];
1153 }
1154 
1155 #ifdef CONFIG_ZONE_DMA
1156 #define INIT_KMALLOC_INFO(__size, __short_size)			\
1157 {								\
1158 	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
1159 	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
1160 	.name[KMALLOC_DMA]     = "dma-kmalloc-" #__short_size,	\
1161 	.size = __size,						\
1162 }
1163 #else
1164 #define INIT_KMALLOC_INFO(__size, __short_size)			\
1165 {								\
1166 	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
1167 	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
1168 	.size = __size,						\
1169 }
1170 #endif
1171 
1172 /*
1173  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
1174  * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
1175  * kmalloc-67108864.
1176  */
1177 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
1178 	INIT_KMALLOC_INFO(0, 0),
1179 	INIT_KMALLOC_INFO(96, 96),
1180 	INIT_KMALLOC_INFO(192, 192),
1181 	INIT_KMALLOC_INFO(8, 8),
1182 	INIT_KMALLOC_INFO(16, 16),
1183 	INIT_KMALLOC_INFO(32, 32),
1184 	INIT_KMALLOC_INFO(64, 64),
1185 	INIT_KMALLOC_INFO(128, 128),
1186 	INIT_KMALLOC_INFO(256, 256),
1187 	INIT_KMALLOC_INFO(512, 512),
1188 	INIT_KMALLOC_INFO(1024, 1k),
1189 	INIT_KMALLOC_INFO(2048, 2k),
1190 	INIT_KMALLOC_INFO(4096, 4k),
1191 	INIT_KMALLOC_INFO(8192, 8k),
1192 	INIT_KMALLOC_INFO(16384, 16k),
1193 	INIT_KMALLOC_INFO(32768, 32k),
1194 	INIT_KMALLOC_INFO(65536, 64k),
1195 	INIT_KMALLOC_INFO(131072, 128k),
1196 	INIT_KMALLOC_INFO(262144, 256k),
1197 	INIT_KMALLOC_INFO(524288, 512k),
1198 	INIT_KMALLOC_INFO(1048576, 1M),
1199 	INIT_KMALLOC_INFO(2097152, 2M),
1200 	INIT_KMALLOC_INFO(4194304, 4M),
1201 	INIT_KMALLOC_INFO(8388608, 8M),
1202 	INIT_KMALLOC_INFO(16777216, 16M),
1203 	INIT_KMALLOC_INFO(33554432, 32M),
1204 	INIT_KMALLOC_INFO(67108864, 64M)
1205 };
1206 
1207 /*
1208  * Patch up the size_index table if we have strange large alignment
1209  * requirements for the kmalloc array. This is only the case for
1210  * MIPS it seems. The standard arches will not generate any code here.
1211  *
1212  * Largest permitted alignment is 256 bytes due to the way we
1213  * handle the index determination for the smaller caches.
1214  *
1215  * Make sure that nothing crazy happens if someone starts tinkering
1216  * around with ARCH_KMALLOC_MINALIGN
1217  */
1218 void __init setup_kmalloc_cache_index_table(void)
1219 {
1220 	unsigned int i;
1221 
1222 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1223 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1224 
1225 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1226 		unsigned int elem = size_index_elem(i);
1227 
1228 		if (elem >= ARRAY_SIZE(size_index))
1229 			break;
1230 		size_index[elem] = KMALLOC_SHIFT_LOW;
1231 	}
1232 
1233 	if (KMALLOC_MIN_SIZE >= 64) {
1234 		/*
1235 		 * The 96 byte size cache is not used if the alignment
1236 		 * is 64 byte.
1237 		 */
1238 		for (i = 64 + 8; i <= 96; i += 8)
1239 			size_index[size_index_elem(i)] = 7;
1240 
1241 	}
1242 
1243 	if (KMALLOC_MIN_SIZE >= 128) {
1244 		/*
1245 		 * The 192 byte sized cache is not used if the alignment
1246 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
1247 		 * instead.
1248 		 */
1249 		for (i = 128 + 8; i <= 192; i += 8)
1250 			size_index[size_index_elem(i)] = 8;
1251 	}
1252 }
1253 
1254 static void __init
1255 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
1256 {
1257 	if (type == KMALLOC_RECLAIM)
1258 		flags |= SLAB_RECLAIM_ACCOUNT;
1259 
1260 	kmalloc_caches[type][idx] = create_kmalloc_cache(
1261 					kmalloc_info[idx].name[type],
1262 					kmalloc_info[idx].size, flags, 0,
1263 					kmalloc_info[idx].size);
1264 }
1265 
1266 /*
1267  * Create the kmalloc array. Some of the regular kmalloc arrays
1268  * may already have been created because they were needed to
1269  * enable allocations for slab creation.
1270  */
1271 void __init create_kmalloc_caches(slab_flags_t flags)
1272 {
1273 	int i;
1274 	enum kmalloc_cache_type type;
1275 
1276 	for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
1277 		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1278 			if (!kmalloc_caches[type][i])
1279 				new_kmalloc_cache(i, type, flags);
1280 
1281 			/*
1282 			 * Caches that are not of the two-to-the-power-of size.
1283 			 * These have to be created immediately after the
1284 			 * earlier power of two caches
1285 			 */
1286 			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
1287 					!kmalloc_caches[type][1])
1288 				new_kmalloc_cache(1, type, flags);
1289 			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
1290 					!kmalloc_caches[type][2])
1291 				new_kmalloc_cache(2, type, flags);
1292 		}
1293 	}
1294 
1295 	/* Kmalloc array is now usable */
1296 	slab_state = UP;
1297 
1298 #ifdef CONFIG_ZONE_DMA
1299 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1300 		struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
1301 
1302 		if (s) {
1303 			kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
1304 				kmalloc_info[i].name[KMALLOC_DMA],
1305 				kmalloc_info[i].size,
1306 				SLAB_CACHE_DMA | flags, 0,
1307 				kmalloc_info[i].size);
1308 		}
1309 	}
1310 #endif
1311 }
1312 #endif /* !CONFIG_SLOB */
1313 
1314 /*
1315  * To avoid unnecessary overhead, we pass through large allocation requests
1316  * directly to the page allocator. We use __GFP_COMP, because we will need to
1317  * know the allocation order to free the pages properly in kfree.
1318  */
1319 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1320 {
1321 	void *ret = NULL;
1322 	struct page *page;
1323 
1324 	flags |= __GFP_COMP;
1325 	page = alloc_pages(flags, order);
1326 	if (likely(page)) {
1327 		ret = page_address(page);
1328 		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1329 				    1 << order);
1330 	}
1331 	ret = kasan_kmalloc_large(ret, size, flags);
1332 	/* As ret might get tagged, call kmemleak hook after KASAN. */
1333 	kmemleak_alloc(ret, size, 1, flags);
1334 	return ret;
1335 }
1336 EXPORT_SYMBOL(kmalloc_order);
1337 
1338 #ifdef CONFIG_TRACING
1339 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1340 {
1341 	void *ret = kmalloc_order(size, flags, order);
1342 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1343 	return ret;
1344 }
1345 EXPORT_SYMBOL(kmalloc_order_trace);
1346 #endif
1347 
1348 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1349 /* Randomize a generic freelist */
1350 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1351 			       unsigned int count)
1352 {
1353 	unsigned int rand;
1354 	unsigned int i;
1355 
1356 	for (i = 0; i < count; i++)
1357 		list[i] = i;
1358 
1359 	/* Fisher-Yates shuffle */
1360 	for (i = count - 1; i > 0; i--) {
1361 		rand = prandom_u32_state(state);
1362 		rand %= (i + 1);
1363 		swap(list[i], list[rand]);
1364 	}
1365 }
1366 
1367 /* Create a random sequence per cache */
1368 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1369 				    gfp_t gfp)
1370 {
1371 	struct rnd_state state;
1372 
1373 	if (count < 2 || cachep->random_seq)
1374 		return 0;
1375 
1376 	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1377 	if (!cachep->random_seq)
1378 		return -ENOMEM;
1379 
1380 	/* Get best entropy at this stage of boot */
1381 	prandom_seed_state(&state, get_random_long());
1382 
1383 	freelist_randomize(&state, cachep->random_seq, count);
1384 	return 0;
1385 }
1386 
1387 /* Destroy the per-cache random freelist sequence */
1388 void cache_random_seq_destroy(struct kmem_cache *cachep)
1389 {
1390 	kfree(cachep->random_seq);
1391 	cachep->random_seq = NULL;
1392 }
1393 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1394 
1395 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1396 #ifdef CONFIG_SLAB
1397 #define SLABINFO_RIGHTS (0600)
1398 #else
1399 #define SLABINFO_RIGHTS (0400)
1400 #endif
1401 
1402 static void print_slabinfo_header(struct seq_file *m)
1403 {
1404 	/*
1405 	 * Output format version, so at least we can change it
1406 	 * without _too_ many complaints.
1407 	 */
1408 #ifdef CONFIG_DEBUG_SLAB
1409 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1410 #else
1411 	seq_puts(m, "slabinfo - version: 2.1\n");
1412 #endif
1413 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1414 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1415 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1416 #ifdef CONFIG_DEBUG_SLAB
1417 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1418 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1419 #endif
1420 	seq_putc(m, '\n');
1421 }
1422 
1423 void *slab_start(struct seq_file *m, loff_t *pos)
1424 {
1425 	mutex_lock(&slab_mutex);
1426 	return seq_list_start(&slab_root_caches, *pos);
1427 }
1428 
1429 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1430 {
1431 	return seq_list_next(p, &slab_root_caches, pos);
1432 }
1433 
1434 void slab_stop(struct seq_file *m, void *p)
1435 {
1436 	mutex_unlock(&slab_mutex);
1437 }
1438 
1439 static void
1440 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1441 {
1442 	struct kmem_cache *c;
1443 	struct slabinfo sinfo;
1444 
1445 	if (!is_root_cache(s))
1446 		return;
1447 
1448 	for_each_memcg_cache(c, s) {
1449 		memset(&sinfo, 0, sizeof(sinfo));
1450 		get_slabinfo(c, &sinfo);
1451 
1452 		info->active_slabs += sinfo.active_slabs;
1453 		info->num_slabs += sinfo.num_slabs;
1454 		info->shared_avail += sinfo.shared_avail;
1455 		info->active_objs += sinfo.active_objs;
1456 		info->num_objs += sinfo.num_objs;
1457 	}
1458 }
1459 
1460 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1461 {
1462 	struct slabinfo sinfo;
1463 
1464 	memset(&sinfo, 0, sizeof(sinfo));
1465 	get_slabinfo(s, &sinfo);
1466 
1467 	memcg_accumulate_slabinfo(s, &sinfo);
1468 
1469 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1470 		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1471 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1472 
1473 	seq_printf(m, " : tunables %4u %4u %4u",
1474 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1475 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1476 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1477 	slabinfo_show_stats(m, s);
1478 	seq_putc(m, '\n');
1479 }
1480 
1481 static int slab_show(struct seq_file *m, void *p)
1482 {
1483 	struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
1484 
1485 	if (p == slab_root_caches.next)
1486 		print_slabinfo_header(m);
1487 	cache_show(s, m);
1488 	return 0;
1489 }
1490 
1491 void dump_unreclaimable_slab(void)
1492 {
1493 	struct kmem_cache *s, *s2;
1494 	struct slabinfo sinfo;
1495 
1496 	/*
1497 	 * Here acquiring slab_mutex is risky since we don't prefer to get
1498 	 * sleep in oom path. But, without mutex hold, it may introduce a
1499 	 * risk of crash.
1500 	 * Use mutex_trylock to protect the list traverse, dump nothing
1501 	 * without acquiring the mutex.
1502 	 */
1503 	if (!mutex_trylock(&slab_mutex)) {
1504 		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1505 		return;
1506 	}
1507 
1508 	pr_info("Unreclaimable slab info:\n");
1509 	pr_info("Name                      Used          Total\n");
1510 
1511 	list_for_each_entry_safe(s, s2, &slab_caches, list) {
1512 		if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
1513 			continue;
1514 
1515 		get_slabinfo(s, &sinfo);
1516 
1517 		if (sinfo.num_objs > 0)
1518 			pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
1519 				(sinfo.active_objs * s->size) / 1024,
1520 				(sinfo.num_objs * s->size) / 1024);
1521 	}
1522 	mutex_unlock(&slab_mutex);
1523 }
1524 
1525 #if defined(CONFIG_MEMCG_KMEM)
1526 void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1527 {
1528 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1529 
1530 	mutex_lock(&slab_mutex);
1531 	return seq_list_start(&memcg->kmem_caches, *pos);
1532 }
1533 
1534 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1535 {
1536 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1537 
1538 	return seq_list_next(p, &memcg->kmem_caches, pos);
1539 }
1540 
1541 void memcg_slab_stop(struct seq_file *m, void *p)
1542 {
1543 	mutex_unlock(&slab_mutex);
1544 }
1545 
1546 int memcg_slab_show(struct seq_file *m, void *p)
1547 {
1548 	struct kmem_cache *s = list_entry(p, struct kmem_cache,
1549 					  memcg_params.kmem_caches_node);
1550 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1551 
1552 	if (p == memcg->kmem_caches.next)
1553 		print_slabinfo_header(m);
1554 	cache_show(s, m);
1555 	return 0;
1556 }
1557 #endif
1558 
1559 /*
1560  * slabinfo_op - iterator that generates /proc/slabinfo
1561  *
1562  * Output layout:
1563  * cache-name
1564  * num-active-objs
1565  * total-objs
1566  * object size
1567  * num-active-slabs
1568  * total-slabs
1569  * num-pages-per-slab
1570  * + further values on SMP and with statistics enabled
1571  */
1572 static const struct seq_operations slabinfo_op = {
1573 	.start = slab_start,
1574 	.next = slab_next,
1575 	.stop = slab_stop,
1576 	.show = slab_show,
1577 };
1578 
1579 static int slabinfo_open(struct inode *inode, struct file *file)
1580 {
1581 	return seq_open(file, &slabinfo_op);
1582 }
1583 
1584 static const struct proc_ops slabinfo_proc_ops = {
1585 	.proc_flags	= PROC_ENTRY_PERMANENT,
1586 	.proc_open	= slabinfo_open,
1587 	.proc_read	= seq_read,
1588 	.proc_write	= slabinfo_write,
1589 	.proc_lseek	= seq_lseek,
1590 	.proc_release	= seq_release,
1591 };
1592 
1593 static int __init slab_proc_init(void)
1594 {
1595 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1596 	return 0;
1597 }
1598 module_init(slab_proc_init);
1599 
1600 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
1601 /*
1602  * Display information about kmem caches that have child memcg caches.
1603  */
1604 static int memcg_slabinfo_show(struct seq_file *m, void *unused)
1605 {
1606 	struct kmem_cache *s, *c;
1607 	struct slabinfo sinfo;
1608 
1609 	mutex_lock(&slab_mutex);
1610 	seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
1611 	seq_puts(m, " <active_slabs> <num_slabs>\n");
1612 	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
1613 		/*
1614 		 * Skip kmem caches that don't have any memcg children.
1615 		 */
1616 		if (list_empty(&s->memcg_params.children))
1617 			continue;
1618 
1619 		memset(&sinfo, 0, sizeof(sinfo));
1620 		get_slabinfo(s, &sinfo);
1621 		seq_printf(m, "%-17s root       %6lu %6lu %6lu %6lu\n",
1622 			   cache_name(s), sinfo.active_objs, sinfo.num_objs,
1623 			   sinfo.active_slabs, sinfo.num_slabs);
1624 
1625 		for_each_memcg_cache(c, s) {
1626 			struct cgroup_subsys_state *css;
1627 			char *status = "";
1628 
1629 			css = &c->memcg_params.memcg->css;
1630 			if (!(css->flags & CSS_ONLINE))
1631 				status = ":dead";
1632 			else if (c->flags & SLAB_DEACTIVATED)
1633 				status = ":deact";
1634 
1635 			memset(&sinfo, 0, sizeof(sinfo));
1636 			get_slabinfo(c, &sinfo);
1637 			seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
1638 				   cache_name(c), css->id, status,
1639 				   sinfo.active_objs, sinfo.num_objs,
1640 				   sinfo.active_slabs, sinfo.num_slabs);
1641 		}
1642 	}
1643 	mutex_unlock(&slab_mutex);
1644 	return 0;
1645 }
1646 DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
1647 
1648 static int __init memcg_slabinfo_init(void)
1649 {
1650 	debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
1651 			    NULL, NULL, &memcg_slabinfo_fops);
1652 	return 0;
1653 }
1654 
1655 late_initcall(memcg_slabinfo_init);
1656 #endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */
1657 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1658 
1659 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1660 					   gfp_t flags)
1661 {
1662 	void *ret;
1663 	size_t ks = 0;
1664 
1665 	if (p)
1666 		ks = ksize(p);
1667 
1668 	if (ks >= new_size) {
1669 		p = kasan_krealloc((void *)p, new_size, flags);
1670 		return (void *)p;
1671 	}
1672 
1673 	ret = kmalloc_track_caller(new_size, flags);
1674 	if (ret && p)
1675 		memcpy(ret, p, ks);
1676 
1677 	return ret;
1678 }
1679 
1680 /**
1681  * krealloc - reallocate memory. The contents will remain unchanged.
1682  * @p: object to reallocate memory for.
1683  * @new_size: how many bytes of memory are required.
1684  * @flags: the type of memory to allocate.
1685  *
1686  * The contents of the object pointed to are preserved up to the
1687  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1688  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1689  * %NULL pointer, the object pointed to is freed.
1690  *
1691  * Return: pointer to the allocated memory or %NULL in case of error
1692  */
1693 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1694 {
1695 	void *ret;
1696 
1697 	if (unlikely(!new_size)) {
1698 		kfree(p);
1699 		return ZERO_SIZE_PTR;
1700 	}
1701 
1702 	ret = __do_krealloc(p, new_size, flags);
1703 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1704 		kfree(p);
1705 
1706 	return ret;
1707 }
1708 EXPORT_SYMBOL(krealloc);
1709 
1710 /**
1711  * kzfree - like kfree but zero memory
1712  * @p: object to free memory of
1713  *
1714  * The memory of the object @p points to is zeroed before freed.
1715  * If @p is %NULL, kzfree() does nothing.
1716  *
1717  * Note: this function zeroes the whole allocated buffer which can be a good
1718  * deal bigger than the requested buffer size passed to kmalloc(). So be
1719  * careful when using this function in performance sensitive code.
1720  */
1721 void kzfree(const void *p)
1722 {
1723 	size_t ks;
1724 	void *mem = (void *)p;
1725 
1726 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1727 		return;
1728 	ks = ksize(mem);
1729 	memset(mem, 0, ks);
1730 	kfree(mem);
1731 }
1732 EXPORT_SYMBOL(kzfree);
1733 
1734 /**
1735  * ksize - get the actual amount of memory allocated for a given object
1736  * @objp: Pointer to the object
1737  *
1738  * kmalloc may internally round up allocations and return more memory
1739  * than requested. ksize() can be used to determine the actual amount of
1740  * memory allocated. The caller may use this additional memory, even though
1741  * a smaller amount of memory was initially specified with the kmalloc call.
1742  * The caller must guarantee that objp points to a valid object previously
1743  * allocated with either kmalloc() or kmem_cache_alloc(). The object
1744  * must not be freed during the duration of the call.
1745  *
1746  * Return: size of the actual memory used by @objp in bytes
1747  */
1748 size_t ksize(const void *objp)
1749 {
1750 	size_t size;
1751 
1752 	if (WARN_ON_ONCE(!objp))
1753 		return 0;
1754 	/*
1755 	 * We need to check that the pointed to object is valid, and only then
1756 	 * unpoison the shadow memory below. We use __kasan_check_read(), to
1757 	 * generate a more useful report at the time ksize() is called (rather
1758 	 * than later where behaviour is undefined due to potential
1759 	 * use-after-free or double-free).
1760 	 *
1761 	 * If the pointed to memory is invalid we return 0, to avoid users of
1762 	 * ksize() writing to and potentially corrupting the memory region.
1763 	 *
1764 	 * We want to perform the check before __ksize(), to avoid potentially
1765 	 * crashing in __ksize() due to accessing invalid metadata.
1766 	 */
1767 	if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
1768 		return 0;
1769 
1770 	size = __ksize(objp);
1771 	/*
1772 	 * We assume that ksize callers could use whole allocated area,
1773 	 * so we need to unpoison this area.
1774 	 */
1775 	kasan_unpoison_shadow(objp, size);
1776 	return size;
1777 }
1778 EXPORT_SYMBOL(ksize);
1779 
1780 /* Tracepoints definitions. */
1781 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1782 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1783 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1784 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1785 EXPORT_TRACEPOINT_SYMBOL(kfree);
1786 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1787 
1788 int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1789 {
1790 	if (__should_failslab(s, gfpflags))
1791 		return -ENOMEM;
1792 	return 0;
1793 }
1794 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1795