xref: /openbmc/linux/mm/slab_common.c (revision 609e478b)
1 /*
2  * Slab allocator functions that are independent of the allocator strategy
3  *
4  * (C) 2012 Christoph Lameter <cl@linux.com>
5  */
6 #include <linux/slab.h>
7 
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22 
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25 
26 #include "slab.h"
27 
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32 
33 /*
34  * Set of flags that will prevent slab merging
35  */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 		SLAB_FAILSLAB)
39 
40 #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41 		SLAB_CACHE_DMA | SLAB_NOTRACK)
42 
43 /*
44  * Merge control. If this is set then no merging of slab caches will occur.
45  * (Could be removed. This was introduced to pacify the merge skeptics.)
46  */
47 static int slab_nomerge;
48 
49 static int __init setup_slab_nomerge(char *str)
50 {
51 	slab_nomerge = 1;
52 	return 1;
53 }
54 
55 #ifdef CONFIG_SLUB
56 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57 #endif
58 
59 __setup("slab_nomerge", setup_slab_nomerge);
60 
61 /*
62  * Determine the size of a slab object
63  */
64 unsigned int kmem_cache_size(struct kmem_cache *s)
65 {
66 	return s->object_size;
67 }
68 EXPORT_SYMBOL(kmem_cache_size);
69 
70 #ifdef CONFIG_DEBUG_VM
71 static int kmem_cache_sanity_check(const char *name, size_t size)
72 {
73 	struct kmem_cache *s = NULL;
74 
75 	if (!name || in_interrupt() || size < sizeof(void *) ||
76 		size > KMALLOC_MAX_SIZE) {
77 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 		return -EINVAL;
79 	}
80 
81 	list_for_each_entry(s, &slab_caches, list) {
82 		char tmp;
83 		int res;
84 
85 		/*
86 		 * This happens when the module gets unloaded and doesn't
87 		 * destroy its slab cache and no-one else reuses the vmalloc
88 		 * area of the module.  Print a warning.
89 		 */
90 		res = probe_kernel_address(s->name, tmp);
91 		if (res) {
92 			pr_err("Slab cache with size %d has lost its name\n",
93 			       s->object_size);
94 			continue;
95 		}
96 
97 #if !defined(CONFIG_SLUB)
98 		if (!strcmp(s->name, name)) {
99 			pr_err("%s (%s): Cache name already exists.\n",
100 			       __func__, name);
101 			dump_stack();
102 			s = NULL;
103 			return -EINVAL;
104 		}
105 #endif
106 	}
107 
108 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
109 	return 0;
110 }
111 #else
112 static inline int kmem_cache_sanity_check(const char *name, size_t size)
113 {
114 	return 0;
115 }
116 #endif
117 
118 #ifdef CONFIG_MEMCG_KMEM
119 static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
120 		struct kmem_cache *s, struct kmem_cache *root_cache)
121 {
122 	size_t size;
123 
124 	if (!memcg_kmem_enabled())
125 		return 0;
126 
127 	if (!memcg) {
128 		size = offsetof(struct memcg_cache_params, memcg_caches);
129 		size += memcg_limited_groups_array_size * sizeof(void *);
130 	} else
131 		size = sizeof(struct memcg_cache_params);
132 
133 	s->memcg_params = kzalloc(size, GFP_KERNEL);
134 	if (!s->memcg_params)
135 		return -ENOMEM;
136 
137 	if (memcg) {
138 		s->memcg_params->memcg = memcg;
139 		s->memcg_params->root_cache = root_cache;
140 	} else
141 		s->memcg_params->is_root_cache = true;
142 
143 	return 0;
144 }
145 
146 static void memcg_free_cache_params(struct kmem_cache *s)
147 {
148 	kfree(s->memcg_params);
149 }
150 
151 static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
152 {
153 	int size;
154 	struct memcg_cache_params *new_params, *cur_params;
155 
156 	BUG_ON(!is_root_cache(s));
157 
158 	size = offsetof(struct memcg_cache_params, memcg_caches);
159 	size += num_memcgs * sizeof(void *);
160 
161 	new_params = kzalloc(size, GFP_KERNEL);
162 	if (!new_params)
163 		return -ENOMEM;
164 
165 	cur_params = s->memcg_params;
166 	memcpy(new_params->memcg_caches, cur_params->memcg_caches,
167 	       memcg_limited_groups_array_size * sizeof(void *));
168 
169 	new_params->is_root_cache = true;
170 
171 	rcu_assign_pointer(s->memcg_params, new_params);
172 	if (cur_params)
173 		kfree_rcu(cur_params, rcu_head);
174 
175 	return 0;
176 }
177 
178 int memcg_update_all_caches(int num_memcgs)
179 {
180 	struct kmem_cache *s;
181 	int ret = 0;
182 	mutex_lock(&slab_mutex);
183 
184 	list_for_each_entry(s, &slab_caches, list) {
185 		if (!is_root_cache(s))
186 			continue;
187 
188 		ret = memcg_update_cache_params(s, num_memcgs);
189 		/*
190 		 * Instead of freeing the memory, we'll just leave the caches
191 		 * up to this point in an updated state.
192 		 */
193 		if (ret)
194 			goto out;
195 	}
196 
197 	memcg_update_array_size(num_memcgs);
198 out:
199 	mutex_unlock(&slab_mutex);
200 	return ret;
201 }
202 #else
203 static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
204 		struct kmem_cache *s, struct kmem_cache *root_cache)
205 {
206 	return 0;
207 }
208 
209 static inline void memcg_free_cache_params(struct kmem_cache *s)
210 {
211 }
212 #endif /* CONFIG_MEMCG_KMEM */
213 
214 /*
215  * Find a mergeable slab cache
216  */
217 int slab_unmergeable(struct kmem_cache *s)
218 {
219 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
220 		return 1;
221 
222 	if (!is_root_cache(s))
223 		return 1;
224 
225 	if (s->ctor)
226 		return 1;
227 
228 	/*
229 	 * We may have set a slab to be unmergeable during bootstrap.
230 	 */
231 	if (s->refcount < 0)
232 		return 1;
233 
234 	return 0;
235 }
236 
237 struct kmem_cache *find_mergeable(size_t size, size_t align,
238 		unsigned long flags, const char *name, void (*ctor)(void *))
239 {
240 	struct kmem_cache *s;
241 
242 	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
243 		return NULL;
244 
245 	if (ctor)
246 		return NULL;
247 
248 	size = ALIGN(size, sizeof(void *));
249 	align = calculate_alignment(flags, align, size);
250 	size = ALIGN(size, align);
251 	flags = kmem_cache_flags(size, flags, name, NULL);
252 
253 	list_for_each_entry(s, &slab_caches, list) {
254 		if (slab_unmergeable(s))
255 			continue;
256 
257 		if (size > s->size)
258 			continue;
259 
260 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
261 			continue;
262 		/*
263 		 * Check if alignment is compatible.
264 		 * Courtesy of Adrian Drzewiecki
265 		 */
266 		if ((s->size & ~(align - 1)) != s->size)
267 			continue;
268 
269 		if (s->size - size >= sizeof(void *))
270 			continue;
271 
272 		return s;
273 	}
274 	return NULL;
275 }
276 
277 /*
278  * Figure out what the alignment of the objects will be given a set of
279  * flags, a user specified alignment and the size of the objects.
280  */
281 unsigned long calculate_alignment(unsigned long flags,
282 		unsigned long align, unsigned long size)
283 {
284 	/*
285 	 * If the user wants hardware cache aligned objects then follow that
286 	 * suggestion if the object is sufficiently large.
287 	 *
288 	 * The hardware cache alignment cannot override the specified
289 	 * alignment though. If that is greater then use it.
290 	 */
291 	if (flags & SLAB_HWCACHE_ALIGN) {
292 		unsigned long ralign = cache_line_size();
293 		while (size <= ralign / 2)
294 			ralign /= 2;
295 		align = max(align, ralign);
296 	}
297 
298 	if (align < ARCH_SLAB_MINALIGN)
299 		align = ARCH_SLAB_MINALIGN;
300 
301 	return ALIGN(align, sizeof(void *));
302 }
303 
304 static struct kmem_cache *
305 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
306 		     unsigned long flags, void (*ctor)(void *),
307 		     struct mem_cgroup *memcg, struct kmem_cache *root_cache)
308 {
309 	struct kmem_cache *s;
310 	int err;
311 
312 	err = -ENOMEM;
313 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
314 	if (!s)
315 		goto out;
316 
317 	s->name = name;
318 	s->object_size = object_size;
319 	s->size = size;
320 	s->align = align;
321 	s->ctor = ctor;
322 
323 	err = memcg_alloc_cache_params(memcg, s, root_cache);
324 	if (err)
325 		goto out_free_cache;
326 
327 	err = __kmem_cache_create(s, flags);
328 	if (err)
329 		goto out_free_cache;
330 
331 	s->refcount = 1;
332 	list_add(&s->list, &slab_caches);
333 out:
334 	if (err)
335 		return ERR_PTR(err);
336 	return s;
337 
338 out_free_cache:
339 	memcg_free_cache_params(s);
340 	kfree(s);
341 	goto out;
342 }
343 
344 /*
345  * kmem_cache_create - Create a cache.
346  * @name: A string which is used in /proc/slabinfo to identify this cache.
347  * @size: The size of objects to be created in this cache.
348  * @align: The required alignment for the objects.
349  * @flags: SLAB flags
350  * @ctor: A constructor for the objects.
351  *
352  * Returns a ptr to the cache on success, NULL on failure.
353  * Cannot be called within a interrupt, but can be interrupted.
354  * The @ctor is run when new pages are allocated by the cache.
355  *
356  * The flags are
357  *
358  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
359  * to catch references to uninitialised memory.
360  *
361  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
362  * for buffer overruns.
363  *
364  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
365  * cacheline.  This can be beneficial if you're counting cycles as closely
366  * as davem.
367  */
368 struct kmem_cache *
369 kmem_cache_create(const char *name, size_t size, size_t align,
370 		  unsigned long flags, void (*ctor)(void *))
371 {
372 	struct kmem_cache *s;
373 	char *cache_name;
374 	int err;
375 
376 	get_online_cpus();
377 	get_online_mems();
378 
379 	mutex_lock(&slab_mutex);
380 
381 	err = kmem_cache_sanity_check(name, size);
382 	if (err) {
383 		s = NULL;	/* suppress uninit var warning */
384 		goto out_unlock;
385 	}
386 
387 	/*
388 	 * Some allocators will constraint the set of valid flags to a subset
389 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
390 	 * case, and we'll just provide them with a sanitized version of the
391 	 * passed flags.
392 	 */
393 	flags &= CACHE_CREATE_MASK;
394 
395 	s = __kmem_cache_alias(name, size, align, flags, ctor);
396 	if (s)
397 		goto out_unlock;
398 
399 	cache_name = kstrdup(name, GFP_KERNEL);
400 	if (!cache_name) {
401 		err = -ENOMEM;
402 		goto out_unlock;
403 	}
404 
405 	s = do_kmem_cache_create(cache_name, size, size,
406 				 calculate_alignment(flags, align, size),
407 				 flags, ctor, NULL, NULL);
408 	if (IS_ERR(s)) {
409 		err = PTR_ERR(s);
410 		kfree(cache_name);
411 	}
412 
413 out_unlock:
414 	mutex_unlock(&slab_mutex);
415 
416 	put_online_mems();
417 	put_online_cpus();
418 
419 	if (err) {
420 		if (flags & SLAB_PANIC)
421 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
422 				name, err);
423 		else {
424 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
425 				name, err);
426 			dump_stack();
427 		}
428 		return NULL;
429 	}
430 	return s;
431 }
432 EXPORT_SYMBOL(kmem_cache_create);
433 
434 #ifdef CONFIG_MEMCG_KMEM
435 /*
436  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
437  * @memcg: The memory cgroup the new cache is for.
438  * @root_cache: The parent of the new cache.
439  * @memcg_name: The name of the memory cgroup (used for naming the new cache).
440  *
441  * This function attempts to create a kmem cache that will serve allocation
442  * requests going from @memcg to @root_cache. The new cache inherits properties
443  * from its parent.
444  */
445 struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
446 					   struct kmem_cache *root_cache,
447 					   const char *memcg_name)
448 {
449 	struct kmem_cache *s = NULL;
450 	char *cache_name;
451 
452 	get_online_cpus();
453 	get_online_mems();
454 
455 	mutex_lock(&slab_mutex);
456 
457 	cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
458 			       memcg_cache_id(memcg), memcg_name);
459 	if (!cache_name)
460 		goto out_unlock;
461 
462 	s = do_kmem_cache_create(cache_name, root_cache->object_size,
463 				 root_cache->size, root_cache->align,
464 				 root_cache->flags, root_cache->ctor,
465 				 memcg, root_cache);
466 	if (IS_ERR(s)) {
467 		kfree(cache_name);
468 		s = NULL;
469 	}
470 
471 out_unlock:
472 	mutex_unlock(&slab_mutex);
473 
474 	put_online_mems();
475 	put_online_cpus();
476 
477 	return s;
478 }
479 
480 static int memcg_cleanup_cache_params(struct kmem_cache *s)
481 {
482 	int rc;
483 
484 	if (!s->memcg_params ||
485 	    !s->memcg_params->is_root_cache)
486 		return 0;
487 
488 	mutex_unlock(&slab_mutex);
489 	rc = __memcg_cleanup_cache_params(s);
490 	mutex_lock(&slab_mutex);
491 
492 	return rc;
493 }
494 #else
495 static int memcg_cleanup_cache_params(struct kmem_cache *s)
496 {
497 	return 0;
498 }
499 #endif /* CONFIG_MEMCG_KMEM */
500 
501 void slab_kmem_cache_release(struct kmem_cache *s)
502 {
503 	kfree(s->name);
504 	kmem_cache_free(kmem_cache, s);
505 }
506 
507 void kmem_cache_destroy(struct kmem_cache *s)
508 {
509 	get_online_cpus();
510 	get_online_mems();
511 
512 	mutex_lock(&slab_mutex);
513 
514 	s->refcount--;
515 	if (s->refcount)
516 		goto out_unlock;
517 
518 	if (memcg_cleanup_cache_params(s) != 0)
519 		goto out_unlock;
520 
521 	if (__kmem_cache_shutdown(s) != 0) {
522 		printk(KERN_ERR "kmem_cache_destroy %s: "
523 		       "Slab cache still has objects\n", s->name);
524 		dump_stack();
525 		goto out_unlock;
526 	}
527 
528 	list_del(&s->list);
529 
530 	mutex_unlock(&slab_mutex);
531 	if (s->flags & SLAB_DESTROY_BY_RCU)
532 		rcu_barrier();
533 
534 	memcg_free_cache_params(s);
535 #ifdef SLAB_SUPPORTS_SYSFS
536 	sysfs_slab_remove(s);
537 #else
538 	slab_kmem_cache_release(s);
539 #endif
540 	goto out;
541 
542 out_unlock:
543 	mutex_unlock(&slab_mutex);
544 out:
545 	put_online_mems();
546 	put_online_cpus();
547 }
548 EXPORT_SYMBOL(kmem_cache_destroy);
549 
550 /**
551  * kmem_cache_shrink - Shrink a cache.
552  * @cachep: The cache to shrink.
553  *
554  * Releases as many slabs as possible for a cache.
555  * To help debugging, a zero exit status indicates all slabs were released.
556  */
557 int kmem_cache_shrink(struct kmem_cache *cachep)
558 {
559 	int ret;
560 
561 	get_online_cpus();
562 	get_online_mems();
563 	ret = __kmem_cache_shrink(cachep);
564 	put_online_mems();
565 	put_online_cpus();
566 	return ret;
567 }
568 EXPORT_SYMBOL(kmem_cache_shrink);
569 
570 int slab_is_available(void)
571 {
572 	return slab_state >= UP;
573 }
574 
575 #ifndef CONFIG_SLOB
576 /* Create a cache during boot when no slab services are available yet */
577 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
578 		unsigned long flags)
579 {
580 	int err;
581 
582 	s->name = name;
583 	s->size = s->object_size = size;
584 	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
585 	err = __kmem_cache_create(s, flags);
586 
587 	if (err)
588 		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
589 					name, size, err);
590 
591 	s->refcount = -1;	/* Exempt from merging for now */
592 }
593 
594 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
595 				unsigned long flags)
596 {
597 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
598 
599 	if (!s)
600 		panic("Out of memory when creating slab %s\n", name);
601 
602 	create_boot_cache(s, name, size, flags);
603 	list_add(&s->list, &slab_caches);
604 	s->refcount = 1;
605 	return s;
606 }
607 
608 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
609 EXPORT_SYMBOL(kmalloc_caches);
610 
611 #ifdef CONFIG_ZONE_DMA
612 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
613 EXPORT_SYMBOL(kmalloc_dma_caches);
614 #endif
615 
616 /*
617  * Conversion table for small slabs sizes / 8 to the index in the
618  * kmalloc array. This is necessary for slabs < 192 since we have non power
619  * of two cache sizes there. The size of larger slabs can be determined using
620  * fls.
621  */
622 static s8 size_index[24] = {
623 	3,	/* 8 */
624 	4,	/* 16 */
625 	5,	/* 24 */
626 	5,	/* 32 */
627 	6,	/* 40 */
628 	6,	/* 48 */
629 	6,	/* 56 */
630 	6,	/* 64 */
631 	1,	/* 72 */
632 	1,	/* 80 */
633 	1,	/* 88 */
634 	1,	/* 96 */
635 	7,	/* 104 */
636 	7,	/* 112 */
637 	7,	/* 120 */
638 	7,	/* 128 */
639 	2,	/* 136 */
640 	2,	/* 144 */
641 	2,	/* 152 */
642 	2,	/* 160 */
643 	2,	/* 168 */
644 	2,	/* 176 */
645 	2,	/* 184 */
646 	2	/* 192 */
647 };
648 
649 static inline int size_index_elem(size_t bytes)
650 {
651 	return (bytes - 1) / 8;
652 }
653 
654 /*
655  * Find the kmem_cache structure that serves a given size of
656  * allocation
657  */
658 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
659 {
660 	int index;
661 
662 	if (unlikely(size > KMALLOC_MAX_SIZE)) {
663 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
664 		return NULL;
665 	}
666 
667 	if (size <= 192) {
668 		if (!size)
669 			return ZERO_SIZE_PTR;
670 
671 		index = size_index[size_index_elem(size)];
672 	} else
673 		index = fls(size - 1);
674 
675 #ifdef CONFIG_ZONE_DMA
676 	if (unlikely((flags & GFP_DMA)))
677 		return kmalloc_dma_caches[index];
678 
679 #endif
680 	return kmalloc_caches[index];
681 }
682 
683 /*
684  * Create the kmalloc array. Some of the regular kmalloc arrays
685  * may already have been created because they were needed to
686  * enable allocations for slab creation.
687  */
688 void __init create_kmalloc_caches(unsigned long flags)
689 {
690 	int i;
691 
692 	/*
693 	 * Patch up the size_index table if we have strange large alignment
694 	 * requirements for the kmalloc array. This is only the case for
695 	 * MIPS it seems. The standard arches will not generate any code here.
696 	 *
697 	 * Largest permitted alignment is 256 bytes due to the way we
698 	 * handle the index determination for the smaller caches.
699 	 *
700 	 * Make sure that nothing crazy happens if someone starts tinkering
701 	 * around with ARCH_KMALLOC_MINALIGN
702 	 */
703 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
704 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
705 
706 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
707 		int elem = size_index_elem(i);
708 
709 		if (elem >= ARRAY_SIZE(size_index))
710 			break;
711 		size_index[elem] = KMALLOC_SHIFT_LOW;
712 	}
713 
714 	if (KMALLOC_MIN_SIZE >= 64) {
715 		/*
716 		 * The 96 byte size cache is not used if the alignment
717 		 * is 64 byte.
718 		 */
719 		for (i = 64 + 8; i <= 96; i += 8)
720 			size_index[size_index_elem(i)] = 7;
721 
722 	}
723 
724 	if (KMALLOC_MIN_SIZE >= 128) {
725 		/*
726 		 * The 192 byte sized cache is not used if the alignment
727 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
728 		 * instead.
729 		 */
730 		for (i = 128 + 8; i <= 192; i += 8)
731 			size_index[size_index_elem(i)] = 8;
732 	}
733 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
734 		if (!kmalloc_caches[i]) {
735 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
736 							1 << i, flags);
737 		}
738 
739 		/*
740 		 * Caches that are not of the two-to-the-power-of size.
741 		 * These have to be created immediately after the
742 		 * earlier power of two caches
743 		 */
744 		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
745 			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
746 
747 		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
748 			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
749 	}
750 
751 	/* Kmalloc array is now usable */
752 	slab_state = UP;
753 
754 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
755 		struct kmem_cache *s = kmalloc_caches[i];
756 		char *n;
757 
758 		if (s) {
759 			n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
760 
761 			BUG_ON(!n);
762 			s->name = n;
763 		}
764 	}
765 
766 #ifdef CONFIG_ZONE_DMA
767 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
768 		struct kmem_cache *s = kmalloc_caches[i];
769 
770 		if (s) {
771 			int size = kmalloc_size(i);
772 			char *n = kasprintf(GFP_NOWAIT,
773 				 "dma-kmalloc-%d", size);
774 
775 			BUG_ON(!n);
776 			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
777 				size, SLAB_CACHE_DMA | flags);
778 		}
779 	}
780 #endif
781 }
782 #endif /* !CONFIG_SLOB */
783 
784 /*
785  * To avoid unnecessary overhead, we pass through large allocation requests
786  * directly to the page allocator. We use __GFP_COMP, because we will need to
787  * know the allocation order to free the pages properly in kfree.
788  */
789 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
790 {
791 	void *ret;
792 	struct page *page;
793 
794 	flags |= __GFP_COMP;
795 	page = alloc_kmem_pages(flags, order);
796 	ret = page ? page_address(page) : NULL;
797 	kmemleak_alloc(ret, size, 1, flags);
798 	return ret;
799 }
800 EXPORT_SYMBOL(kmalloc_order);
801 
802 #ifdef CONFIG_TRACING
803 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
804 {
805 	void *ret = kmalloc_order(size, flags, order);
806 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
807 	return ret;
808 }
809 EXPORT_SYMBOL(kmalloc_order_trace);
810 #endif
811 
812 #ifdef CONFIG_SLABINFO
813 
814 #ifdef CONFIG_SLAB
815 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
816 #else
817 #define SLABINFO_RIGHTS S_IRUSR
818 #endif
819 
820 void print_slabinfo_header(struct seq_file *m)
821 {
822 	/*
823 	 * Output format version, so at least we can change it
824 	 * without _too_ many complaints.
825 	 */
826 #ifdef CONFIG_DEBUG_SLAB
827 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
828 #else
829 	seq_puts(m, "slabinfo - version: 2.1\n");
830 #endif
831 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
832 		 "<objperslab> <pagesperslab>");
833 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
834 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
835 #ifdef CONFIG_DEBUG_SLAB
836 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
837 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
838 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
839 #endif
840 	seq_putc(m, '\n');
841 }
842 
843 static void *s_start(struct seq_file *m, loff_t *pos)
844 {
845 	loff_t n = *pos;
846 
847 	mutex_lock(&slab_mutex);
848 	if (!n)
849 		print_slabinfo_header(m);
850 
851 	return seq_list_start(&slab_caches, *pos);
852 }
853 
854 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
855 {
856 	return seq_list_next(p, &slab_caches, pos);
857 }
858 
859 void slab_stop(struct seq_file *m, void *p)
860 {
861 	mutex_unlock(&slab_mutex);
862 }
863 
864 static void
865 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
866 {
867 	struct kmem_cache *c;
868 	struct slabinfo sinfo;
869 	int i;
870 
871 	if (!is_root_cache(s))
872 		return;
873 
874 	for_each_memcg_cache_index(i) {
875 		c = cache_from_memcg_idx(s, i);
876 		if (!c)
877 			continue;
878 
879 		memset(&sinfo, 0, sizeof(sinfo));
880 		get_slabinfo(c, &sinfo);
881 
882 		info->active_slabs += sinfo.active_slabs;
883 		info->num_slabs += sinfo.num_slabs;
884 		info->shared_avail += sinfo.shared_avail;
885 		info->active_objs += sinfo.active_objs;
886 		info->num_objs += sinfo.num_objs;
887 	}
888 }
889 
890 int cache_show(struct kmem_cache *s, struct seq_file *m)
891 {
892 	struct slabinfo sinfo;
893 
894 	memset(&sinfo, 0, sizeof(sinfo));
895 	get_slabinfo(s, &sinfo);
896 
897 	memcg_accumulate_slabinfo(s, &sinfo);
898 
899 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
900 		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
901 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
902 
903 	seq_printf(m, " : tunables %4u %4u %4u",
904 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
905 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
906 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
907 	slabinfo_show_stats(m, s);
908 	seq_putc(m, '\n');
909 	return 0;
910 }
911 
912 static int s_show(struct seq_file *m, void *p)
913 {
914 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
915 
916 	if (!is_root_cache(s))
917 		return 0;
918 	return cache_show(s, m);
919 }
920 
921 /*
922  * slabinfo_op - iterator that generates /proc/slabinfo
923  *
924  * Output layout:
925  * cache-name
926  * num-active-objs
927  * total-objs
928  * object size
929  * num-active-slabs
930  * total-slabs
931  * num-pages-per-slab
932  * + further values on SMP and with statistics enabled
933  */
934 static const struct seq_operations slabinfo_op = {
935 	.start = s_start,
936 	.next = slab_next,
937 	.stop = slab_stop,
938 	.show = s_show,
939 };
940 
941 static int slabinfo_open(struct inode *inode, struct file *file)
942 {
943 	return seq_open(file, &slabinfo_op);
944 }
945 
946 static const struct file_operations proc_slabinfo_operations = {
947 	.open		= slabinfo_open,
948 	.read		= seq_read,
949 	.write          = slabinfo_write,
950 	.llseek		= seq_lseek,
951 	.release	= seq_release,
952 };
953 
954 static int __init slab_proc_init(void)
955 {
956 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
957 						&proc_slabinfo_operations);
958 	return 0;
959 }
960 module_init(slab_proc_init);
961 #endif /* CONFIG_SLABINFO */
962 
963 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
964 					   gfp_t flags)
965 {
966 	void *ret;
967 	size_t ks = 0;
968 
969 	if (p)
970 		ks = ksize(p);
971 
972 	if (ks >= new_size)
973 		return (void *)p;
974 
975 	ret = kmalloc_track_caller(new_size, flags);
976 	if (ret && p)
977 		memcpy(ret, p, ks);
978 
979 	return ret;
980 }
981 
982 /**
983  * __krealloc - like krealloc() but don't free @p.
984  * @p: object to reallocate memory for.
985  * @new_size: how many bytes of memory are required.
986  * @flags: the type of memory to allocate.
987  *
988  * This function is like krealloc() except it never frees the originally
989  * allocated buffer. Use this if you don't want to free the buffer immediately
990  * like, for example, with RCU.
991  */
992 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
993 {
994 	if (unlikely(!new_size))
995 		return ZERO_SIZE_PTR;
996 
997 	return __do_krealloc(p, new_size, flags);
998 
999 }
1000 EXPORT_SYMBOL(__krealloc);
1001 
1002 /**
1003  * krealloc - reallocate memory. The contents will remain unchanged.
1004  * @p: object to reallocate memory for.
1005  * @new_size: how many bytes of memory are required.
1006  * @flags: the type of memory to allocate.
1007  *
1008  * The contents of the object pointed to are preserved up to the
1009  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1010  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1011  * %NULL pointer, the object pointed to is freed.
1012  */
1013 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1014 {
1015 	void *ret;
1016 
1017 	if (unlikely(!new_size)) {
1018 		kfree(p);
1019 		return ZERO_SIZE_PTR;
1020 	}
1021 
1022 	ret = __do_krealloc(p, new_size, flags);
1023 	if (ret && p != ret)
1024 		kfree(p);
1025 
1026 	return ret;
1027 }
1028 EXPORT_SYMBOL(krealloc);
1029 
1030 /**
1031  * kzfree - like kfree but zero memory
1032  * @p: object to free memory of
1033  *
1034  * The memory of the object @p points to is zeroed before freed.
1035  * If @p is %NULL, kzfree() does nothing.
1036  *
1037  * Note: this function zeroes the whole allocated buffer which can be a good
1038  * deal bigger than the requested buffer size passed to kmalloc(). So be
1039  * careful when using this function in performance sensitive code.
1040  */
1041 void kzfree(const void *p)
1042 {
1043 	size_t ks;
1044 	void *mem = (void *)p;
1045 
1046 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1047 		return;
1048 	ks = ksize(mem);
1049 	memset(mem, 0, ks);
1050 	kfree(mem);
1051 }
1052 EXPORT_SYMBOL(kzfree);
1053 
1054 /* Tracepoints definitions. */
1055 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1056 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1057 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1058 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1059 EXPORT_TRACEPOINT_SYMBOL(kfree);
1060 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1061