xref: /openbmc/linux/mm/slab_common.c (revision adc286e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Slab allocator functions that are independent of the allocator strategy
4  *
5  * (C) 2012 Christoph Lameter <cl@linux.com>
6  */
7 #include <linux/slab.h>
8 
9 #include <linux/mm.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/kfence.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/seq_file.h>
20 #include <linux/proc_fs.h>
21 #include <linux/debugfs.h>
22 #include <linux/kasan.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/page.h>
26 #include <linux/memcontrol.h>
27 
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/kmem.h>
30 
31 #include "internal.h"
32 
33 #include "slab.h"
34 
35 enum slab_state slab_state;
36 LIST_HEAD(slab_caches);
37 DEFINE_MUTEX(slab_mutex);
38 struct kmem_cache *kmem_cache;
39 
40 static LIST_HEAD(slab_caches_to_rcu_destroy);
41 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
42 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
43 		    slab_caches_to_rcu_destroy_workfn);
44 
45 /*
46  * Set of flags that will prevent slab merging
47  */
48 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
49 		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
50 		SLAB_FAILSLAB | kasan_never_merge())
51 
52 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
53 			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
54 
55 /*
56  * Merge control. If this is set then no merging of slab caches will occur.
57  */
58 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
59 
60 static int __init setup_slab_nomerge(char *str)
61 {
62 	slab_nomerge = true;
63 	return 1;
64 }
65 
66 static int __init setup_slab_merge(char *str)
67 {
68 	slab_nomerge = false;
69 	return 1;
70 }
71 
72 #ifdef CONFIG_SLUB
73 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
74 __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
75 #endif
76 
77 __setup("slab_nomerge", setup_slab_nomerge);
78 __setup("slab_merge", setup_slab_merge);
79 
80 /*
81  * Determine the size of a slab object
82  */
83 unsigned int kmem_cache_size(struct kmem_cache *s)
84 {
85 	return s->object_size;
86 }
87 EXPORT_SYMBOL(kmem_cache_size);
88 
89 #ifdef CONFIG_DEBUG_VM
90 static int kmem_cache_sanity_check(const char *name, unsigned int size)
91 {
92 	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
93 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
94 		return -EINVAL;
95 	}
96 
97 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
98 	return 0;
99 }
100 #else
101 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
102 {
103 	return 0;
104 }
105 #endif
106 
107 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108 {
109 	size_t i;
110 
111 	for (i = 0; i < nr; i++) {
112 		if (s)
113 			kmem_cache_free(s, p[i]);
114 		else
115 			kfree(p[i]);
116 	}
117 }
118 
119 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
120 								void **p)
121 {
122 	size_t i;
123 
124 	for (i = 0; i < nr; i++) {
125 		void *x = p[i] = kmem_cache_alloc(s, flags);
126 		if (!x) {
127 			__kmem_cache_free_bulk(s, i, p);
128 			return 0;
129 		}
130 	}
131 	return i;
132 }
133 
134 /*
135  * Figure out what the alignment of the objects will be given a set of
136  * flags, a user specified alignment and the size of the objects.
137  */
138 static unsigned int calculate_alignment(slab_flags_t flags,
139 		unsigned int align, unsigned int size)
140 {
141 	/*
142 	 * If the user wants hardware cache aligned objects then follow that
143 	 * suggestion if the object is sufficiently large.
144 	 *
145 	 * The hardware cache alignment cannot override the specified
146 	 * alignment though. If that is greater then use it.
147 	 */
148 	if (flags & SLAB_HWCACHE_ALIGN) {
149 		unsigned int ralign;
150 
151 		ralign = cache_line_size();
152 		while (size <= ralign / 2)
153 			ralign /= 2;
154 		align = max(align, ralign);
155 	}
156 
157 	align = max(align, arch_slab_minalign());
158 
159 	return ALIGN(align, sizeof(void *));
160 }
161 
162 /*
163  * Find a mergeable slab cache
164  */
165 int slab_unmergeable(struct kmem_cache *s)
166 {
167 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
168 		return 1;
169 
170 	if (s->ctor)
171 		return 1;
172 
173 	if (s->usersize)
174 		return 1;
175 
176 	/*
177 	 * We may have set a slab to be unmergeable during bootstrap.
178 	 */
179 	if (s->refcount < 0)
180 		return 1;
181 
182 	return 0;
183 }
184 
185 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
186 		slab_flags_t flags, const char *name, void (*ctor)(void *))
187 {
188 	struct kmem_cache *s;
189 
190 	if (slab_nomerge)
191 		return NULL;
192 
193 	if (ctor)
194 		return NULL;
195 
196 	size = ALIGN(size, sizeof(void *));
197 	align = calculate_alignment(flags, align, size);
198 	size = ALIGN(size, align);
199 	flags = kmem_cache_flags(size, flags, name);
200 
201 	if (flags & SLAB_NEVER_MERGE)
202 		return NULL;
203 
204 	list_for_each_entry_reverse(s, &slab_caches, list) {
205 		if (slab_unmergeable(s))
206 			continue;
207 
208 		if (size > s->size)
209 			continue;
210 
211 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
212 			continue;
213 		/*
214 		 * Check if alignment is compatible.
215 		 * Courtesy of Adrian Drzewiecki
216 		 */
217 		if ((s->size & ~(align - 1)) != s->size)
218 			continue;
219 
220 		if (s->size - size >= sizeof(void *))
221 			continue;
222 
223 		if (IS_ENABLED(CONFIG_SLAB) && align &&
224 			(align > s->align || s->align % align))
225 			continue;
226 
227 		return s;
228 	}
229 	return NULL;
230 }
231 
232 static struct kmem_cache *create_cache(const char *name,
233 		unsigned int object_size, unsigned int align,
234 		slab_flags_t flags, unsigned int useroffset,
235 		unsigned int usersize, void (*ctor)(void *),
236 		struct kmem_cache *root_cache)
237 {
238 	struct kmem_cache *s;
239 	int err;
240 
241 	if (WARN_ON(useroffset + usersize > object_size))
242 		useroffset = usersize = 0;
243 
244 	err = -ENOMEM;
245 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
246 	if (!s)
247 		goto out;
248 
249 	s->name = name;
250 	s->size = s->object_size = object_size;
251 	s->align = align;
252 	s->ctor = ctor;
253 	s->useroffset = useroffset;
254 	s->usersize = usersize;
255 
256 	err = __kmem_cache_create(s, flags);
257 	if (err)
258 		goto out_free_cache;
259 
260 	s->refcount = 1;
261 	list_add(&s->list, &slab_caches);
262 out:
263 	if (err)
264 		return ERR_PTR(err);
265 	return s;
266 
267 out_free_cache:
268 	kmem_cache_free(kmem_cache, s);
269 	goto out;
270 }
271 
272 /**
273  * kmem_cache_create_usercopy - Create a cache with a region suitable
274  * for copying to userspace
275  * @name: A string which is used in /proc/slabinfo to identify this cache.
276  * @size: The size of objects to be created in this cache.
277  * @align: The required alignment for the objects.
278  * @flags: SLAB flags
279  * @useroffset: Usercopy region offset
280  * @usersize: Usercopy region size
281  * @ctor: A constructor for the objects.
282  *
283  * Cannot be called within a interrupt, but can be interrupted.
284  * The @ctor is run when new pages are allocated by the cache.
285  *
286  * The flags are
287  *
288  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
289  * to catch references to uninitialised memory.
290  *
291  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
292  * for buffer overruns.
293  *
294  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
295  * cacheline.  This can be beneficial if you're counting cycles as closely
296  * as davem.
297  *
298  * Return: a pointer to the cache on success, NULL on failure.
299  */
300 struct kmem_cache *
301 kmem_cache_create_usercopy(const char *name,
302 		  unsigned int size, unsigned int align,
303 		  slab_flags_t flags,
304 		  unsigned int useroffset, unsigned int usersize,
305 		  void (*ctor)(void *))
306 {
307 	struct kmem_cache *s = NULL;
308 	const char *cache_name;
309 	int err;
310 
311 #ifdef CONFIG_SLUB_DEBUG
312 	/*
313 	 * If no slub_debug was enabled globally, the static key is not yet
314 	 * enabled by setup_slub_debug(). Enable it if the cache is being
315 	 * created with any of the debugging flags passed explicitly.
316 	 */
317 	if (flags & SLAB_DEBUG_FLAGS)
318 		static_branch_enable(&slub_debug_enabled);
319 #endif
320 
321 	mutex_lock(&slab_mutex);
322 
323 	err = kmem_cache_sanity_check(name, size);
324 	if (err) {
325 		goto out_unlock;
326 	}
327 
328 	/* Refuse requests with allocator specific flags */
329 	if (flags & ~SLAB_FLAGS_PERMITTED) {
330 		err = -EINVAL;
331 		goto out_unlock;
332 	}
333 
334 	/*
335 	 * Some allocators will constraint the set of valid flags to a subset
336 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
337 	 * case, and we'll just provide them with a sanitized version of the
338 	 * passed flags.
339 	 */
340 	flags &= CACHE_CREATE_MASK;
341 
342 	/* Fail closed on bad usersize of useroffset values. */
343 	if (WARN_ON(!usersize && useroffset) ||
344 	    WARN_ON(size < usersize || size - usersize < useroffset))
345 		usersize = useroffset = 0;
346 
347 	if (!usersize)
348 		s = __kmem_cache_alias(name, size, align, flags, ctor);
349 	if (s)
350 		goto out_unlock;
351 
352 	cache_name = kstrdup_const(name, GFP_KERNEL);
353 	if (!cache_name) {
354 		err = -ENOMEM;
355 		goto out_unlock;
356 	}
357 
358 	s = create_cache(cache_name, size,
359 			 calculate_alignment(flags, align, size),
360 			 flags, useroffset, usersize, ctor, NULL);
361 	if (IS_ERR(s)) {
362 		err = PTR_ERR(s);
363 		kfree_const(cache_name);
364 	}
365 
366 out_unlock:
367 	mutex_unlock(&slab_mutex);
368 
369 	if (err) {
370 		if (flags & SLAB_PANIC)
371 			panic("%s: Failed to create slab '%s'. Error %d\n",
372 				__func__, name, err);
373 		else {
374 			pr_warn("%s(%s) failed with error %d\n",
375 				__func__, name, err);
376 			dump_stack();
377 		}
378 		return NULL;
379 	}
380 	return s;
381 }
382 EXPORT_SYMBOL(kmem_cache_create_usercopy);
383 
384 /**
385  * kmem_cache_create - Create a cache.
386  * @name: A string which is used in /proc/slabinfo to identify this cache.
387  * @size: The size of objects to be created in this cache.
388  * @align: The required alignment for the objects.
389  * @flags: SLAB flags
390  * @ctor: A constructor for the objects.
391  *
392  * Cannot be called within a interrupt, but can be interrupted.
393  * The @ctor is run when new pages are allocated by the cache.
394  *
395  * The flags are
396  *
397  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
398  * to catch references to uninitialised memory.
399  *
400  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
401  * for buffer overruns.
402  *
403  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
404  * cacheline.  This can be beneficial if you're counting cycles as closely
405  * as davem.
406  *
407  * Return: a pointer to the cache on success, NULL on failure.
408  */
409 struct kmem_cache *
410 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
411 		slab_flags_t flags, void (*ctor)(void *))
412 {
413 	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
414 					  ctor);
415 }
416 EXPORT_SYMBOL(kmem_cache_create);
417 
418 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
419 {
420 	LIST_HEAD(to_destroy);
421 	struct kmem_cache *s, *s2;
422 
423 	/*
424 	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
425 	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
426 	 * through RCU and the associated kmem_cache are dereferenced
427 	 * while freeing the pages, so the kmem_caches should be freed only
428 	 * after the pending RCU operations are finished.  As rcu_barrier()
429 	 * is a pretty slow operation, we batch all pending destructions
430 	 * asynchronously.
431 	 */
432 	mutex_lock(&slab_mutex);
433 	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
434 	mutex_unlock(&slab_mutex);
435 
436 	if (list_empty(&to_destroy))
437 		return;
438 
439 	rcu_barrier();
440 
441 	list_for_each_entry_safe(s, s2, &to_destroy, list) {
442 		debugfs_slab_release(s);
443 		kfence_shutdown_cache(s);
444 #ifdef SLAB_SUPPORTS_SYSFS
445 		sysfs_slab_release(s);
446 #else
447 		slab_kmem_cache_release(s);
448 #endif
449 	}
450 }
451 
452 static int shutdown_cache(struct kmem_cache *s)
453 {
454 	/* free asan quarantined objects */
455 	kasan_cache_shutdown(s);
456 
457 	if (__kmem_cache_shutdown(s) != 0)
458 		return -EBUSY;
459 
460 	list_del(&s->list);
461 
462 	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
463 #ifdef SLAB_SUPPORTS_SYSFS
464 		sysfs_slab_unlink(s);
465 #endif
466 		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
467 		schedule_work(&slab_caches_to_rcu_destroy_work);
468 	} else {
469 		kfence_shutdown_cache(s);
470 		debugfs_slab_release(s);
471 #ifdef SLAB_SUPPORTS_SYSFS
472 		sysfs_slab_unlink(s);
473 		sysfs_slab_release(s);
474 #else
475 		slab_kmem_cache_release(s);
476 #endif
477 	}
478 
479 	return 0;
480 }
481 
482 void slab_kmem_cache_release(struct kmem_cache *s)
483 {
484 	__kmem_cache_release(s);
485 	kfree_const(s->name);
486 	kmem_cache_free(kmem_cache, s);
487 }
488 
489 void kmem_cache_destroy(struct kmem_cache *s)
490 {
491 	if (unlikely(!s) || !kasan_check_byte(s))
492 		return;
493 
494 	cpus_read_lock();
495 	mutex_lock(&slab_mutex);
496 
497 	s->refcount--;
498 	if (s->refcount)
499 		goto out_unlock;
500 
501 	WARN(shutdown_cache(s),
502 	     "%s %s: Slab cache still has objects when called from %pS",
503 	     __func__, s->name, (void *)_RET_IP_);
504 out_unlock:
505 	mutex_unlock(&slab_mutex);
506 	cpus_read_unlock();
507 }
508 EXPORT_SYMBOL(kmem_cache_destroy);
509 
510 /**
511  * kmem_cache_shrink - Shrink a cache.
512  * @cachep: The cache to shrink.
513  *
514  * Releases as many slabs as possible for a cache.
515  * To help debugging, a zero exit status indicates all slabs were released.
516  *
517  * Return: %0 if all slabs were released, non-zero otherwise
518  */
519 int kmem_cache_shrink(struct kmem_cache *cachep)
520 {
521 	int ret;
522 
523 
524 	kasan_cache_shrink(cachep);
525 	ret = __kmem_cache_shrink(cachep);
526 
527 	return ret;
528 }
529 EXPORT_SYMBOL(kmem_cache_shrink);
530 
531 bool slab_is_available(void)
532 {
533 	return slab_state >= UP;
534 }
535 
536 #ifdef CONFIG_PRINTK
537 /**
538  * kmem_valid_obj - does the pointer reference a valid slab object?
539  * @object: pointer to query.
540  *
541  * Return: %true if the pointer is to a not-yet-freed object from
542  * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
543  * is to an already-freed object, and %false otherwise.
544  */
545 bool kmem_valid_obj(void *object)
546 {
547 	struct folio *folio;
548 
549 	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
550 	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
551 		return false;
552 	folio = virt_to_folio(object);
553 	return folio_test_slab(folio);
554 }
555 EXPORT_SYMBOL_GPL(kmem_valid_obj);
556 
557 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
558 {
559 	if (__kfence_obj_info(kpp, object, slab))
560 		return;
561 	__kmem_obj_info(kpp, object, slab);
562 }
563 
564 /**
565  * kmem_dump_obj - Print available slab provenance information
566  * @object: slab object for which to find provenance information.
567  *
568  * This function uses pr_cont(), so that the caller is expected to have
569  * printed out whatever preamble is appropriate.  The provenance information
570  * depends on the type of object and on how much debugging is enabled.
571  * For a slab-cache object, the fact that it is a slab object is printed,
572  * and, if available, the slab name, return address, and stack trace from
573  * the allocation and last free path of that object.
574  *
575  * This function will splat if passed a pointer to a non-slab object.
576  * If you are not sure what type of object you have, you should instead
577  * use mem_dump_obj().
578  */
579 void kmem_dump_obj(void *object)
580 {
581 	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
582 	int i;
583 	struct slab *slab;
584 	unsigned long ptroffset;
585 	struct kmem_obj_info kp = { };
586 
587 	if (WARN_ON_ONCE(!virt_addr_valid(object)))
588 		return;
589 	slab = virt_to_slab(object);
590 	if (WARN_ON_ONCE(!slab)) {
591 		pr_cont(" non-slab memory.\n");
592 		return;
593 	}
594 	kmem_obj_info(&kp, object, slab);
595 	if (kp.kp_slab_cache)
596 		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
597 	else
598 		pr_cont(" slab%s", cp);
599 	if (is_kfence_address(object))
600 		pr_cont(" (kfence)");
601 	if (kp.kp_objp)
602 		pr_cont(" start %px", kp.kp_objp);
603 	if (kp.kp_data_offset)
604 		pr_cont(" data offset %lu", kp.kp_data_offset);
605 	if (kp.kp_objp) {
606 		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
607 		pr_cont(" pointer offset %lu", ptroffset);
608 	}
609 	if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
610 		pr_cont(" size %u", kp.kp_slab_cache->usersize);
611 	if (kp.kp_ret)
612 		pr_cont(" allocated at %pS\n", kp.kp_ret);
613 	else
614 		pr_cont("\n");
615 	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
616 		if (!kp.kp_stack[i])
617 			break;
618 		pr_info("    %pS\n", kp.kp_stack[i]);
619 	}
620 
621 	if (kp.kp_free_stack[0])
622 		pr_cont(" Free path:\n");
623 
624 	for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
625 		if (!kp.kp_free_stack[i])
626 			break;
627 		pr_info("    %pS\n", kp.kp_free_stack[i]);
628 	}
629 
630 }
631 EXPORT_SYMBOL_GPL(kmem_dump_obj);
632 #endif
633 
634 #ifndef CONFIG_SLOB
635 /* Create a cache during boot when no slab services are available yet */
636 void __init create_boot_cache(struct kmem_cache *s, const char *name,
637 		unsigned int size, slab_flags_t flags,
638 		unsigned int useroffset, unsigned int usersize)
639 {
640 	int err;
641 	unsigned int align = ARCH_KMALLOC_MINALIGN;
642 
643 	s->name = name;
644 	s->size = s->object_size = size;
645 
646 	/*
647 	 * For power of two sizes, guarantee natural alignment for kmalloc
648 	 * caches, regardless of SL*B debugging options.
649 	 */
650 	if (is_power_of_2(size))
651 		align = max(align, size);
652 	s->align = calculate_alignment(flags, align, size);
653 
654 	s->useroffset = useroffset;
655 	s->usersize = usersize;
656 
657 	err = __kmem_cache_create(s, flags);
658 
659 	if (err)
660 		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
661 					name, size, err);
662 
663 	s->refcount = -1;	/* Exempt from merging for now */
664 }
665 
666 struct kmem_cache *__init create_kmalloc_cache(const char *name,
667 		unsigned int size, slab_flags_t flags,
668 		unsigned int useroffset, unsigned int usersize)
669 {
670 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
671 
672 	if (!s)
673 		panic("Out of memory when creating slab %s\n", name);
674 
675 	create_boot_cache(s, name, size, flags, useroffset, usersize);
676 	kasan_cache_create_kmalloc(s);
677 	list_add(&s->list, &slab_caches);
678 	s->refcount = 1;
679 	return s;
680 }
681 
682 struct kmem_cache *
683 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
684 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
685 EXPORT_SYMBOL(kmalloc_caches);
686 
687 /*
688  * Conversion table for small slabs sizes / 8 to the index in the
689  * kmalloc array. This is necessary for slabs < 192 since we have non power
690  * of two cache sizes there. The size of larger slabs can be determined using
691  * fls.
692  */
693 static u8 size_index[24] __ro_after_init = {
694 	3,	/* 8 */
695 	4,	/* 16 */
696 	5,	/* 24 */
697 	5,	/* 32 */
698 	6,	/* 40 */
699 	6,	/* 48 */
700 	6,	/* 56 */
701 	6,	/* 64 */
702 	1,	/* 72 */
703 	1,	/* 80 */
704 	1,	/* 88 */
705 	1,	/* 96 */
706 	7,	/* 104 */
707 	7,	/* 112 */
708 	7,	/* 120 */
709 	7,	/* 128 */
710 	2,	/* 136 */
711 	2,	/* 144 */
712 	2,	/* 152 */
713 	2,	/* 160 */
714 	2,	/* 168 */
715 	2,	/* 176 */
716 	2,	/* 184 */
717 	2	/* 192 */
718 };
719 
720 static inline unsigned int size_index_elem(unsigned int bytes)
721 {
722 	return (bytes - 1) / 8;
723 }
724 
725 /*
726  * Find the kmem_cache structure that serves a given size of
727  * allocation
728  */
729 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
730 {
731 	unsigned int index;
732 
733 	if (size <= 192) {
734 		if (!size)
735 			return ZERO_SIZE_PTR;
736 
737 		index = size_index[size_index_elem(size)];
738 	} else {
739 		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
740 			return NULL;
741 		index = fls(size - 1);
742 	}
743 
744 	return kmalloc_caches[kmalloc_type(flags)][index];
745 }
746 
747 #ifdef CONFIG_ZONE_DMA
748 #define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
749 #else
750 #define KMALLOC_DMA_NAME(sz)
751 #endif
752 
753 #ifdef CONFIG_MEMCG_KMEM
754 #define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
755 #else
756 #define KMALLOC_CGROUP_NAME(sz)
757 #endif
758 
759 #define INIT_KMALLOC_INFO(__size, __short_size)			\
760 {								\
761 	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
762 	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
763 	KMALLOC_CGROUP_NAME(__short_size)			\
764 	KMALLOC_DMA_NAME(__short_size)				\
765 	.size = __size,						\
766 }
767 
768 /*
769  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
770  * kmalloc_index() supports up to 2^25=32MB, so the final entry of the table is
771  * kmalloc-32M.
772  */
773 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
774 	INIT_KMALLOC_INFO(0, 0),
775 	INIT_KMALLOC_INFO(96, 96),
776 	INIT_KMALLOC_INFO(192, 192),
777 	INIT_KMALLOC_INFO(8, 8),
778 	INIT_KMALLOC_INFO(16, 16),
779 	INIT_KMALLOC_INFO(32, 32),
780 	INIT_KMALLOC_INFO(64, 64),
781 	INIT_KMALLOC_INFO(128, 128),
782 	INIT_KMALLOC_INFO(256, 256),
783 	INIT_KMALLOC_INFO(512, 512),
784 	INIT_KMALLOC_INFO(1024, 1k),
785 	INIT_KMALLOC_INFO(2048, 2k),
786 	INIT_KMALLOC_INFO(4096, 4k),
787 	INIT_KMALLOC_INFO(8192, 8k),
788 	INIT_KMALLOC_INFO(16384, 16k),
789 	INIT_KMALLOC_INFO(32768, 32k),
790 	INIT_KMALLOC_INFO(65536, 64k),
791 	INIT_KMALLOC_INFO(131072, 128k),
792 	INIT_KMALLOC_INFO(262144, 256k),
793 	INIT_KMALLOC_INFO(524288, 512k),
794 	INIT_KMALLOC_INFO(1048576, 1M),
795 	INIT_KMALLOC_INFO(2097152, 2M),
796 	INIT_KMALLOC_INFO(4194304, 4M),
797 	INIT_KMALLOC_INFO(8388608, 8M),
798 	INIT_KMALLOC_INFO(16777216, 16M),
799 	INIT_KMALLOC_INFO(33554432, 32M)
800 };
801 
802 /*
803  * Patch up the size_index table if we have strange large alignment
804  * requirements for the kmalloc array. This is only the case for
805  * MIPS it seems. The standard arches will not generate any code here.
806  *
807  * Largest permitted alignment is 256 bytes due to the way we
808  * handle the index determination for the smaller caches.
809  *
810  * Make sure that nothing crazy happens if someone starts tinkering
811  * around with ARCH_KMALLOC_MINALIGN
812  */
813 void __init setup_kmalloc_cache_index_table(void)
814 {
815 	unsigned int i;
816 
817 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
818 		!is_power_of_2(KMALLOC_MIN_SIZE));
819 
820 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
821 		unsigned int elem = size_index_elem(i);
822 
823 		if (elem >= ARRAY_SIZE(size_index))
824 			break;
825 		size_index[elem] = KMALLOC_SHIFT_LOW;
826 	}
827 
828 	if (KMALLOC_MIN_SIZE >= 64) {
829 		/*
830 		 * The 96 byte sized cache is not used if the alignment
831 		 * is 64 byte.
832 		 */
833 		for (i = 64 + 8; i <= 96; i += 8)
834 			size_index[size_index_elem(i)] = 7;
835 
836 	}
837 
838 	if (KMALLOC_MIN_SIZE >= 128) {
839 		/*
840 		 * The 192 byte sized cache is not used if the alignment
841 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
842 		 * instead.
843 		 */
844 		for (i = 128 + 8; i <= 192; i += 8)
845 			size_index[size_index_elem(i)] = 8;
846 	}
847 }
848 
849 static void __init
850 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
851 {
852 	if (type == KMALLOC_RECLAIM) {
853 		flags |= SLAB_RECLAIM_ACCOUNT;
854 	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
855 		if (mem_cgroup_kmem_disabled()) {
856 			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
857 			return;
858 		}
859 		flags |= SLAB_ACCOUNT;
860 	}
861 
862 	kmalloc_caches[type][idx] = create_kmalloc_cache(
863 					kmalloc_info[idx].name[type],
864 					kmalloc_info[idx].size, flags, 0,
865 					kmalloc_info[idx].size);
866 
867 	/*
868 	 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
869 	 * KMALLOC_NORMAL caches.
870 	 */
871 	if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
872 		kmalloc_caches[type][idx]->refcount = -1;
873 }
874 
875 /*
876  * Create the kmalloc array. Some of the regular kmalloc arrays
877  * may already have been created because they were needed to
878  * enable allocations for slab creation.
879  */
880 void __init create_kmalloc_caches(slab_flags_t flags)
881 {
882 	int i;
883 	enum kmalloc_cache_type type;
884 
885 	/*
886 	 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
887 	 */
888 	for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
889 		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
890 			if (!kmalloc_caches[type][i])
891 				new_kmalloc_cache(i, type, flags);
892 
893 			/*
894 			 * Caches that are not of the two-to-the-power-of size.
895 			 * These have to be created immediately after the
896 			 * earlier power of two caches
897 			 */
898 			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
899 					!kmalloc_caches[type][1])
900 				new_kmalloc_cache(1, type, flags);
901 			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
902 					!kmalloc_caches[type][2])
903 				new_kmalloc_cache(2, type, flags);
904 		}
905 	}
906 
907 	/* Kmalloc array is now usable */
908 	slab_state = UP;
909 
910 #ifdef CONFIG_ZONE_DMA
911 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
912 		struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
913 
914 		if (s) {
915 			kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
916 				kmalloc_info[i].name[KMALLOC_DMA],
917 				kmalloc_info[i].size,
918 				SLAB_CACHE_DMA | flags, 0,
919 				kmalloc_info[i].size);
920 		}
921 	}
922 #endif
923 }
924 #endif /* !CONFIG_SLOB */
925 
926 gfp_t kmalloc_fix_flags(gfp_t flags)
927 {
928 	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
929 
930 	flags &= ~GFP_SLAB_BUG_MASK;
931 	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
932 			invalid_mask, &invalid_mask, flags, &flags);
933 	dump_stack();
934 
935 	return flags;
936 }
937 
938 /*
939  * To avoid unnecessary overhead, we pass through large allocation requests
940  * directly to the page allocator. We use __GFP_COMP, because we will need to
941  * know the allocation order to free the pages properly in kfree.
942  */
943 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
944 {
945 	void *ret = NULL;
946 	struct page *page;
947 
948 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
949 		flags = kmalloc_fix_flags(flags);
950 
951 	flags |= __GFP_COMP;
952 	page = alloc_pages(flags, order);
953 	if (likely(page)) {
954 		ret = page_address(page);
955 		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
956 				      PAGE_SIZE << order);
957 	}
958 	ret = kasan_kmalloc_large(ret, size, flags);
959 	/* As ret might get tagged, call kmemleak hook after KASAN. */
960 	kmemleak_alloc(ret, size, 1, flags);
961 	return ret;
962 }
963 EXPORT_SYMBOL(kmalloc_order);
964 
965 #ifdef CONFIG_TRACING
966 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
967 {
968 	void *ret = kmalloc_order(size, flags, order);
969 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
970 	return ret;
971 }
972 EXPORT_SYMBOL(kmalloc_order_trace);
973 #endif
974 
975 #ifdef CONFIG_SLAB_FREELIST_RANDOM
976 /* Randomize a generic freelist */
977 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
978 			       unsigned int count)
979 {
980 	unsigned int rand;
981 	unsigned int i;
982 
983 	for (i = 0; i < count; i++)
984 		list[i] = i;
985 
986 	/* Fisher-Yates shuffle */
987 	for (i = count - 1; i > 0; i--) {
988 		rand = prandom_u32_state(state);
989 		rand %= (i + 1);
990 		swap(list[i], list[rand]);
991 	}
992 }
993 
994 /* Create a random sequence per cache */
995 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
996 				    gfp_t gfp)
997 {
998 	struct rnd_state state;
999 
1000 	if (count < 2 || cachep->random_seq)
1001 		return 0;
1002 
1003 	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1004 	if (!cachep->random_seq)
1005 		return -ENOMEM;
1006 
1007 	/* Get best entropy at this stage of boot */
1008 	prandom_seed_state(&state, get_random_long());
1009 
1010 	freelist_randomize(&state, cachep->random_seq, count);
1011 	return 0;
1012 }
1013 
1014 /* Destroy the per-cache random freelist sequence */
1015 void cache_random_seq_destroy(struct kmem_cache *cachep)
1016 {
1017 	kfree(cachep->random_seq);
1018 	cachep->random_seq = NULL;
1019 }
1020 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1021 
1022 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1023 #ifdef CONFIG_SLAB
1024 #define SLABINFO_RIGHTS (0600)
1025 #else
1026 #define SLABINFO_RIGHTS (0400)
1027 #endif
1028 
1029 static void print_slabinfo_header(struct seq_file *m)
1030 {
1031 	/*
1032 	 * Output format version, so at least we can change it
1033 	 * without _too_ many complaints.
1034 	 */
1035 #ifdef CONFIG_DEBUG_SLAB
1036 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1037 #else
1038 	seq_puts(m, "slabinfo - version: 2.1\n");
1039 #endif
1040 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1041 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1042 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1043 #ifdef CONFIG_DEBUG_SLAB
1044 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1045 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1046 #endif
1047 	seq_putc(m, '\n');
1048 }
1049 
1050 static void *slab_start(struct seq_file *m, loff_t *pos)
1051 {
1052 	mutex_lock(&slab_mutex);
1053 	return seq_list_start(&slab_caches, *pos);
1054 }
1055 
1056 static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1057 {
1058 	return seq_list_next(p, &slab_caches, pos);
1059 }
1060 
1061 static void slab_stop(struct seq_file *m, void *p)
1062 {
1063 	mutex_unlock(&slab_mutex);
1064 }
1065 
1066 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1067 {
1068 	struct slabinfo sinfo;
1069 
1070 	memset(&sinfo, 0, sizeof(sinfo));
1071 	get_slabinfo(s, &sinfo);
1072 
1073 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1074 		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1075 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1076 
1077 	seq_printf(m, " : tunables %4u %4u %4u",
1078 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1079 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1080 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1081 	slabinfo_show_stats(m, s);
1082 	seq_putc(m, '\n');
1083 }
1084 
1085 static int slab_show(struct seq_file *m, void *p)
1086 {
1087 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1088 
1089 	if (p == slab_caches.next)
1090 		print_slabinfo_header(m);
1091 	cache_show(s, m);
1092 	return 0;
1093 }
1094 
1095 void dump_unreclaimable_slab(void)
1096 {
1097 	struct kmem_cache *s;
1098 	struct slabinfo sinfo;
1099 
1100 	/*
1101 	 * Here acquiring slab_mutex is risky since we don't prefer to get
1102 	 * sleep in oom path. But, without mutex hold, it may introduce a
1103 	 * risk of crash.
1104 	 * Use mutex_trylock to protect the list traverse, dump nothing
1105 	 * without acquiring the mutex.
1106 	 */
1107 	if (!mutex_trylock(&slab_mutex)) {
1108 		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1109 		return;
1110 	}
1111 
1112 	pr_info("Unreclaimable slab info:\n");
1113 	pr_info("Name                      Used          Total\n");
1114 
1115 	list_for_each_entry(s, &slab_caches, list) {
1116 		if (s->flags & SLAB_RECLAIM_ACCOUNT)
1117 			continue;
1118 
1119 		get_slabinfo(s, &sinfo);
1120 
1121 		if (sinfo.num_objs > 0)
1122 			pr_info("%-17s %10luKB %10luKB\n", s->name,
1123 				(sinfo.active_objs * s->size) / 1024,
1124 				(sinfo.num_objs * s->size) / 1024);
1125 	}
1126 	mutex_unlock(&slab_mutex);
1127 }
1128 
1129 /*
1130  * slabinfo_op - iterator that generates /proc/slabinfo
1131  *
1132  * Output layout:
1133  * cache-name
1134  * num-active-objs
1135  * total-objs
1136  * object size
1137  * num-active-slabs
1138  * total-slabs
1139  * num-pages-per-slab
1140  * + further values on SMP and with statistics enabled
1141  */
1142 static const struct seq_operations slabinfo_op = {
1143 	.start = slab_start,
1144 	.next = slab_next,
1145 	.stop = slab_stop,
1146 	.show = slab_show,
1147 };
1148 
1149 static int slabinfo_open(struct inode *inode, struct file *file)
1150 {
1151 	return seq_open(file, &slabinfo_op);
1152 }
1153 
1154 static const struct proc_ops slabinfo_proc_ops = {
1155 	.proc_flags	= PROC_ENTRY_PERMANENT,
1156 	.proc_open	= slabinfo_open,
1157 	.proc_read	= seq_read,
1158 	.proc_write	= slabinfo_write,
1159 	.proc_lseek	= seq_lseek,
1160 	.proc_release	= seq_release,
1161 };
1162 
1163 static int __init slab_proc_init(void)
1164 {
1165 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1166 	return 0;
1167 }
1168 module_init(slab_proc_init);
1169 
1170 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1171 
1172 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1173 					   gfp_t flags)
1174 {
1175 	void *ret;
1176 	size_t ks;
1177 
1178 	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
1179 	if (likely(!ZERO_OR_NULL_PTR(p))) {
1180 		if (!kasan_check_byte(p))
1181 			return NULL;
1182 		ks = kfence_ksize(p) ?: __ksize(p);
1183 	} else
1184 		ks = 0;
1185 
1186 	/* If the object still fits, repoison it precisely. */
1187 	if (ks >= new_size) {
1188 		p = kasan_krealloc((void *)p, new_size, flags);
1189 		return (void *)p;
1190 	}
1191 
1192 	ret = kmalloc_track_caller(new_size, flags);
1193 	if (ret && p) {
1194 		/* Disable KASAN checks as the object's redzone is accessed. */
1195 		kasan_disable_current();
1196 		memcpy(ret, kasan_reset_tag(p), ks);
1197 		kasan_enable_current();
1198 	}
1199 
1200 	return ret;
1201 }
1202 
1203 /**
1204  * krealloc - reallocate memory. The contents will remain unchanged.
1205  * @p: object to reallocate memory for.
1206  * @new_size: how many bytes of memory are required.
1207  * @flags: the type of memory to allocate.
1208  *
1209  * The contents of the object pointed to are preserved up to the
1210  * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1211  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
1212  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1213  *
1214  * Return: pointer to the allocated memory or %NULL in case of error
1215  */
1216 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1217 {
1218 	void *ret;
1219 
1220 	if (unlikely(!new_size)) {
1221 		kfree(p);
1222 		return ZERO_SIZE_PTR;
1223 	}
1224 
1225 	ret = __do_krealloc(p, new_size, flags);
1226 	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1227 		kfree(p);
1228 
1229 	return ret;
1230 }
1231 EXPORT_SYMBOL(krealloc);
1232 
1233 /**
1234  * kfree_sensitive - Clear sensitive information in memory before freeing
1235  * @p: object to free memory of
1236  *
1237  * The memory of the object @p points to is zeroed before freed.
1238  * If @p is %NULL, kfree_sensitive() does nothing.
1239  *
1240  * Note: this function zeroes the whole allocated buffer which can be a good
1241  * deal bigger than the requested buffer size passed to kmalloc(). So be
1242  * careful when using this function in performance sensitive code.
1243  */
1244 void kfree_sensitive(const void *p)
1245 {
1246 	size_t ks;
1247 	void *mem = (void *)p;
1248 
1249 	ks = ksize(mem);
1250 	if (ks)
1251 		memzero_explicit(mem, ks);
1252 	kfree(mem);
1253 }
1254 EXPORT_SYMBOL(kfree_sensitive);
1255 
1256 /**
1257  * ksize - get the actual amount of memory allocated for a given object
1258  * @objp: Pointer to the object
1259  *
1260  * kmalloc may internally round up allocations and return more memory
1261  * than requested. ksize() can be used to determine the actual amount of
1262  * memory allocated. The caller may use this additional memory, even though
1263  * a smaller amount of memory was initially specified with the kmalloc call.
1264  * The caller must guarantee that objp points to a valid object previously
1265  * allocated with either kmalloc() or kmem_cache_alloc(). The object
1266  * must not be freed during the duration of the call.
1267  *
1268  * Return: size of the actual memory used by @objp in bytes
1269  */
1270 size_t ksize(const void *objp)
1271 {
1272 	size_t size;
1273 
1274 	/*
1275 	 * We need to first check that the pointer to the object is valid, and
1276 	 * only then unpoison the memory. The report printed from ksize() is
1277 	 * more useful, then when it's printed later when the behaviour could
1278 	 * be undefined due to a potential use-after-free or double-free.
1279 	 *
1280 	 * We use kasan_check_byte(), which is supported for the hardware
1281 	 * tag-based KASAN mode, unlike kasan_check_read/write().
1282 	 *
1283 	 * If the pointed to memory is invalid, we return 0 to avoid users of
1284 	 * ksize() writing to and potentially corrupting the memory region.
1285 	 *
1286 	 * We want to perform the check before __ksize(), to avoid potentially
1287 	 * crashing in __ksize() due to accessing invalid metadata.
1288 	 */
1289 	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1290 		return 0;
1291 
1292 	size = kfence_ksize(objp) ?: __ksize(objp);
1293 	/*
1294 	 * We assume that ksize callers could use whole allocated area,
1295 	 * so we need to unpoison this area.
1296 	 */
1297 	kasan_unpoison_range(objp, size);
1298 	return size;
1299 }
1300 EXPORT_SYMBOL(ksize);
1301 
1302 /* Tracepoints definitions. */
1303 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1304 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1305 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1306 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1307 EXPORT_TRACEPOINT_SYMBOL(kfree);
1308 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1309 
1310 int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1311 {
1312 	if (__should_failslab(s, gfpflags))
1313 		return -ENOMEM;
1314 	return 0;
1315 }
1316 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1317