xref: /openbmc/linux/mm/slab_common.c (revision 2596e07a)
1 /*
2  * Slab allocator functions that are independent of the allocator strategy
3  *
4  * (C) 2012 Christoph Lameter <cl@linux.com>
5  */
6 #include <linux/slab.h>
7 
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22 
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25 
26 #include "slab.h"
27 
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32 
33 /*
34  * Set of flags that will prevent slab merging
35  */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 		SLAB_FAILSLAB)
39 
40 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 			 SLAB_NOTRACK | SLAB_ACCOUNT)
42 
43 /*
44  * Merge control. If this is set then no merging of slab caches will occur.
45  * (Could be removed. This was introduced to pacify the merge skeptics.)
46  */
47 static int slab_nomerge;
48 
49 static int __init setup_slab_nomerge(char *str)
50 {
51 	slab_nomerge = 1;
52 	return 1;
53 }
54 
55 #ifdef CONFIG_SLUB
56 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57 #endif
58 
59 __setup("slab_nomerge", setup_slab_nomerge);
60 
61 /*
62  * Determine the size of a slab object
63  */
64 unsigned int kmem_cache_size(struct kmem_cache *s)
65 {
66 	return s->object_size;
67 }
68 EXPORT_SYMBOL(kmem_cache_size);
69 
70 #ifdef CONFIG_DEBUG_VM
71 static int kmem_cache_sanity_check(const char *name, size_t size)
72 {
73 	struct kmem_cache *s = NULL;
74 
75 	if (!name || in_interrupt() || size < sizeof(void *) ||
76 		size > KMALLOC_MAX_SIZE) {
77 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 		return -EINVAL;
79 	}
80 
81 	list_for_each_entry(s, &slab_caches, list) {
82 		char tmp;
83 		int res;
84 
85 		/*
86 		 * This happens when the module gets unloaded and doesn't
87 		 * destroy its slab cache and no-one else reuses the vmalloc
88 		 * area of the module.  Print a warning.
89 		 */
90 		res = probe_kernel_address(s->name, tmp);
91 		if (res) {
92 			pr_err("Slab cache with size %d has lost its name\n",
93 			       s->object_size);
94 			continue;
95 		}
96 	}
97 
98 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
99 	return 0;
100 }
101 #else
102 static inline int kmem_cache_sanity_check(const char *name, size_t size)
103 {
104 	return 0;
105 }
106 #endif
107 
108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
109 {
110 	size_t i;
111 
112 	for (i = 0; i < nr; i++)
113 		kmem_cache_free(s, p[i]);
114 }
115 
116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
117 								void **p)
118 {
119 	size_t i;
120 
121 	for (i = 0; i < nr; i++) {
122 		void *x = p[i] = kmem_cache_alloc(s, flags);
123 		if (!x) {
124 			__kmem_cache_free_bulk(s, i, p);
125 			return 0;
126 		}
127 	}
128 	return i;
129 }
130 
131 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
132 void slab_init_memcg_params(struct kmem_cache *s)
133 {
134 	s->memcg_params.is_root_cache = true;
135 	INIT_LIST_HEAD(&s->memcg_params.list);
136 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
137 }
138 
139 static int init_memcg_params(struct kmem_cache *s,
140 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
141 {
142 	struct memcg_cache_array *arr;
143 
144 	if (memcg) {
145 		s->memcg_params.is_root_cache = false;
146 		s->memcg_params.memcg = memcg;
147 		s->memcg_params.root_cache = root_cache;
148 		return 0;
149 	}
150 
151 	slab_init_memcg_params(s);
152 
153 	if (!memcg_nr_cache_ids)
154 		return 0;
155 
156 	arr = kzalloc(sizeof(struct memcg_cache_array) +
157 		      memcg_nr_cache_ids * sizeof(void *),
158 		      GFP_KERNEL);
159 	if (!arr)
160 		return -ENOMEM;
161 
162 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
163 	return 0;
164 }
165 
166 static void destroy_memcg_params(struct kmem_cache *s)
167 {
168 	if (is_root_cache(s))
169 		kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
170 }
171 
172 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
173 {
174 	struct memcg_cache_array *old, *new;
175 
176 	if (!is_root_cache(s))
177 		return 0;
178 
179 	new = kzalloc(sizeof(struct memcg_cache_array) +
180 		      new_array_size * sizeof(void *), GFP_KERNEL);
181 	if (!new)
182 		return -ENOMEM;
183 
184 	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
185 					lockdep_is_held(&slab_mutex));
186 	if (old)
187 		memcpy(new->entries, old->entries,
188 		       memcg_nr_cache_ids * sizeof(void *));
189 
190 	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
191 	if (old)
192 		kfree_rcu(old, rcu);
193 	return 0;
194 }
195 
196 int memcg_update_all_caches(int num_memcgs)
197 {
198 	struct kmem_cache *s;
199 	int ret = 0;
200 
201 	mutex_lock(&slab_mutex);
202 	list_for_each_entry(s, &slab_caches, list) {
203 		ret = update_memcg_params(s, num_memcgs);
204 		/*
205 		 * Instead of freeing the memory, we'll just leave the caches
206 		 * up to this point in an updated state.
207 		 */
208 		if (ret)
209 			break;
210 	}
211 	mutex_unlock(&slab_mutex);
212 	return ret;
213 }
214 #else
215 static inline int init_memcg_params(struct kmem_cache *s,
216 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
217 {
218 	return 0;
219 }
220 
221 static inline void destroy_memcg_params(struct kmem_cache *s)
222 {
223 }
224 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
225 
226 /*
227  * Find a mergeable slab cache
228  */
229 int slab_unmergeable(struct kmem_cache *s)
230 {
231 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
232 		return 1;
233 
234 	if (!is_root_cache(s))
235 		return 1;
236 
237 	if (s->ctor)
238 		return 1;
239 
240 	/*
241 	 * We may have set a slab to be unmergeable during bootstrap.
242 	 */
243 	if (s->refcount < 0)
244 		return 1;
245 
246 	return 0;
247 }
248 
249 struct kmem_cache *find_mergeable(size_t size, size_t align,
250 		unsigned long flags, const char *name, void (*ctor)(void *))
251 {
252 	struct kmem_cache *s;
253 
254 	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
255 		return NULL;
256 
257 	if (ctor)
258 		return NULL;
259 
260 	size = ALIGN(size, sizeof(void *));
261 	align = calculate_alignment(flags, align, size);
262 	size = ALIGN(size, align);
263 	flags = kmem_cache_flags(size, flags, name, NULL);
264 
265 	list_for_each_entry_reverse(s, &slab_caches, list) {
266 		if (slab_unmergeable(s))
267 			continue;
268 
269 		if (size > s->size)
270 			continue;
271 
272 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
273 			continue;
274 		/*
275 		 * Check if alignment is compatible.
276 		 * Courtesy of Adrian Drzewiecki
277 		 */
278 		if ((s->size & ~(align - 1)) != s->size)
279 			continue;
280 
281 		if (s->size - size >= sizeof(void *))
282 			continue;
283 
284 		if (IS_ENABLED(CONFIG_SLAB) && align &&
285 			(align > s->align || s->align % align))
286 			continue;
287 
288 		return s;
289 	}
290 	return NULL;
291 }
292 
293 /*
294  * Figure out what the alignment of the objects will be given a set of
295  * flags, a user specified alignment and the size of the objects.
296  */
297 unsigned long calculate_alignment(unsigned long flags,
298 		unsigned long align, unsigned long size)
299 {
300 	/*
301 	 * If the user wants hardware cache aligned objects then follow that
302 	 * suggestion if the object is sufficiently large.
303 	 *
304 	 * The hardware cache alignment cannot override the specified
305 	 * alignment though. If that is greater then use it.
306 	 */
307 	if (flags & SLAB_HWCACHE_ALIGN) {
308 		unsigned long ralign = cache_line_size();
309 		while (size <= ralign / 2)
310 			ralign /= 2;
311 		align = max(align, ralign);
312 	}
313 
314 	if (align < ARCH_SLAB_MINALIGN)
315 		align = ARCH_SLAB_MINALIGN;
316 
317 	return ALIGN(align, sizeof(void *));
318 }
319 
320 static struct kmem_cache *create_cache(const char *name,
321 		size_t object_size, size_t size, size_t align,
322 		unsigned long flags, void (*ctor)(void *),
323 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
324 {
325 	struct kmem_cache *s;
326 	int err;
327 
328 	err = -ENOMEM;
329 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
330 	if (!s)
331 		goto out;
332 
333 	s->name = name;
334 	s->object_size = object_size;
335 	s->size = size;
336 	s->align = align;
337 	s->ctor = ctor;
338 
339 	err = init_memcg_params(s, memcg, root_cache);
340 	if (err)
341 		goto out_free_cache;
342 
343 	err = __kmem_cache_create(s, flags);
344 	if (err)
345 		goto out_free_cache;
346 
347 	s->refcount = 1;
348 	list_add(&s->list, &slab_caches);
349 out:
350 	if (err)
351 		return ERR_PTR(err);
352 	return s;
353 
354 out_free_cache:
355 	destroy_memcg_params(s);
356 	kmem_cache_free(kmem_cache, s);
357 	goto out;
358 }
359 
360 /*
361  * kmem_cache_create - Create a cache.
362  * @name: A string which is used in /proc/slabinfo to identify this cache.
363  * @size: The size of objects to be created in this cache.
364  * @align: The required alignment for the objects.
365  * @flags: SLAB flags
366  * @ctor: A constructor for the objects.
367  *
368  * Returns a ptr to the cache on success, NULL on failure.
369  * Cannot be called within a interrupt, but can be interrupted.
370  * The @ctor is run when new pages are allocated by the cache.
371  *
372  * The flags are
373  *
374  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375  * to catch references to uninitialised memory.
376  *
377  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
378  * for buffer overruns.
379  *
380  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381  * cacheline.  This can be beneficial if you're counting cycles as closely
382  * as davem.
383  */
384 struct kmem_cache *
385 kmem_cache_create(const char *name, size_t size, size_t align,
386 		  unsigned long flags, void (*ctor)(void *))
387 {
388 	struct kmem_cache *s = NULL;
389 	const char *cache_name;
390 	int err;
391 
392 	get_online_cpus();
393 	get_online_mems();
394 	memcg_get_cache_ids();
395 
396 	mutex_lock(&slab_mutex);
397 
398 	err = kmem_cache_sanity_check(name, size);
399 	if (err) {
400 		goto out_unlock;
401 	}
402 
403 	/*
404 	 * Some allocators will constraint the set of valid flags to a subset
405 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
406 	 * case, and we'll just provide them with a sanitized version of the
407 	 * passed flags.
408 	 */
409 	flags &= CACHE_CREATE_MASK;
410 
411 	s = __kmem_cache_alias(name, size, align, flags, ctor);
412 	if (s)
413 		goto out_unlock;
414 
415 	cache_name = kstrdup_const(name, GFP_KERNEL);
416 	if (!cache_name) {
417 		err = -ENOMEM;
418 		goto out_unlock;
419 	}
420 
421 	s = create_cache(cache_name, size, size,
422 			 calculate_alignment(flags, align, size),
423 			 flags, ctor, NULL, NULL);
424 	if (IS_ERR(s)) {
425 		err = PTR_ERR(s);
426 		kfree_const(cache_name);
427 	}
428 
429 out_unlock:
430 	mutex_unlock(&slab_mutex);
431 
432 	memcg_put_cache_ids();
433 	put_online_mems();
434 	put_online_cpus();
435 
436 	if (err) {
437 		if (flags & SLAB_PANIC)
438 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
439 				name, err);
440 		else {
441 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
442 				name, err);
443 			dump_stack();
444 		}
445 		return NULL;
446 	}
447 	return s;
448 }
449 EXPORT_SYMBOL(kmem_cache_create);
450 
451 static int shutdown_cache(struct kmem_cache *s,
452 		struct list_head *release, bool *need_rcu_barrier)
453 {
454 	if (__kmem_cache_shutdown(s) != 0)
455 		return -EBUSY;
456 
457 	if (s->flags & SLAB_DESTROY_BY_RCU)
458 		*need_rcu_barrier = true;
459 
460 	list_move(&s->list, release);
461 	return 0;
462 }
463 
464 static void release_caches(struct list_head *release, bool need_rcu_barrier)
465 {
466 	struct kmem_cache *s, *s2;
467 
468 	if (need_rcu_barrier)
469 		rcu_barrier();
470 
471 	list_for_each_entry_safe(s, s2, release, list) {
472 #ifdef SLAB_SUPPORTS_SYSFS
473 		sysfs_slab_remove(s);
474 #else
475 		slab_kmem_cache_release(s);
476 #endif
477 	}
478 }
479 
480 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
481 /*
482  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
483  * @memcg: The memory cgroup the new cache is for.
484  * @root_cache: The parent of the new cache.
485  *
486  * This function attempts to create a kmem cache that will serve allocation
487  * requests going from @memcg to @root_cache. The new cache inherits properties
488  * from its parent.
489  */
490 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
491 			     struct kmem_cache *root_cache)
492 {
493 	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
494 	struct cgroup_subsys_state *css = &memcg->css;
495 	struct memcg_cache_array *arr;
496 	struct kmem_cache *s = NULL;
497 	char *cache_name;
498 	int idx;
499 
500 	get_online_cpus();
501 	get_online_mems();
502 
503 	mutex_lock(&slab_mutex);
504 
505 	/*
506 	 * The memory cgroup could have been offlined while the cache
507 	 * creation work was pending.
508 	 */
509 	if (!memcg_kmem_online(memcg))
510 		goto out_unlock;
511 
512 	idx = memcg_cache_id(memcg);
513 	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
514 					lockdep_is_held(&slab_mutex));
515 
516 	/*
517 	 * Since per-memcg caches are created asynchronously on first
518 	 * allocation (see memcg_kmem_get_cache()), several threads can try to
519 	 * create the same cache, but only one of them may succeed.
520 	 */
521 	if (arr->entries[idx])
522 		goto out_unlock;
523 
524 	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
525 	cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
526 			       css->id, memcg_name_buf);
527 	if (!cache_name)
528 		goto out_unlock;
529 
530 	s = create_cache(cache_name, root_cache->object_size,
531 			 root_cache->size, root_cache->align,
532 			 root_cache->flags, root_cache->ctor,
533 			 memcg, root_cache);
534 	/*
535 	 * If we could not create a memcg cache, do not complain, because
536 	 * that's not critical at all as we can always proceed with the root
537 	 * cache.
538 	 */
539 	if (IS_ERR(s)) {
540 		kfree(cache_name);
541 		goto out_unlock;
542 	}
543 
544 	list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
545 
546 	/*
547 	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
548 	 * barrier here to ensure nobody will see the kmem_cache partially
549 	 * initialized.
550 	 */
551 	smp_wmb();
552 	arr->entries[idx] = s;
553 
554 out_unlock:
555 	mutex_unlock(&slab_mutex);
556 
557 	put_online_mems();
558 	put_online_cpus();
559 }
560 
561 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
562 {
563 	int idx;
564 	struct memcg_cache_array *arr;
565 	struct kmem_cache *s, *c;
566 
567 	idx = memcg_cache_id(memcg);
568 
569 	get_online_cpus();
570 	get_online_mems();
571 
572 	mutex_lock(&slab_mutex);
573 	list_for_each_entry(s, &slab_caches, list) {
574 		if (!is_root_cache(s))
575 			continue;
576 
577 		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
578 						lockdep_is_held(&slab_mutex));
579 		c = arr->entries[idx];
580 		if (!c)
581 			continue;
582 
583 		__kmem_cache_shrink(c, true);
584 		arr->entries[idx] = NULL;
585 	}
586 	mutex_unlock(&slab_mutex);
587 
588 	put_online_mems();
589 	put_online_cpus();
590 }
591 
592 static int __shutdown_memcg_cache(struct kmem_cache *s,
593 		struct list_head *release, bool *need_rcu_barrier)
594 {
595 	BUG_ON(is_root_cache(s));
596 
597 	if (shutdown_cache(s, release, need_rcu_barrier))
598 		return -EBUSY;
599 
600 	list_del(&s->memcg_params.list);
601 	return 0;
602 }
603 
604 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
605 {
606 	LIST_HEAD(release);
607 	bool need_rcu_barrier = false;
608 	struct kmem_cache *s, *s2;
609 
610 	get_online_cpus();
611 	get_online_mems();
612 
613 	mutex_lock(&slab_mutex);
614 	list_for_each_entry_safe(s, s2, &slab_caches, list) {
615 		if (is_root_cache(s) || s->memcg_params.memcg != memcg)
616 			continue;
617 		/*
618 		 * The cgroup is about to be freed and therefore has no charges
619 		 * left. Hence, all its caches must be empty by now.
620 		 */
621 		BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
622 	}
623 	mutex_unlock(&slab_mutex);
624 
625 	put_online_mems();
626 	put_online_cpus();
627 
628 	release_caches(&release, need_rcu_barrier);
629 }
630 
631 static int shutdown_memcg_caches(struct kmem_cache *s,
632 		struct list_head *release, bool *need_rcu_barrier)
633 {
634 	struct memcg_cache_array *arr;
635 	struct kmem_cache *c, *c2;
636 	LIST_HEAD(busy);
637 	int i;
638 
639 	BUG_ON(!is_root_cache(s));
640 
641 	/*
642 	 * First, shutdown active caches, i.e. caches that belong to online
643 	 * memory cgroups.
644 	 */
645 	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
646 					lockdep_is_held(&slab_mutex));
647 	for_each_memcg_cache_index(i) {
648 		c = arr->entries[i];
649 		if (!c)
650 			continue;
651 		if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
652 			/*
653 			 * The cache still has objects. Move it to a temporary
654 			 * list so as not to try to destroy it for a second
655 			 * time while iterating over inactive caches below.
656 			 */
657 			list_move(&c->memcg_params.list, &busy);
658 		else
659 			/*
660 			 * The cache is empty and will be destroyed soon. Clear
661 			 * the pointer to it in the memcg_caches array so that
662 			 * it will never be accessed even if the root cache
663 			 * stays alive.
664 			 */
665 			arr->entries[i] = NULL;
666 	}
667 
668 	/*
669 	 * Second, shutdown all caches left from memory cgroups that are now
670 	 * offline.
671 	 */
672 	list_for_each_entry_safe(c, c2, &s->memcg_params.list,
673 				 memcg_params.list)
674 		__shutdown_memcg_cache(c, release, need_rcu_barrier);
675 
676 	list_splice(&busy, &s->memcg_params.list);
677 
678 	/*
679 	 * A cache being destroyed must be empty. In particular, this means
680 	 * that all per memcg caches attached to it must be empty too.
681 	 */
682 	if (!list_empty(&s->memcg_params.list))
683 		return -EBUSY;
684 	return 0;
685 }
686 #else
687 static inline int shutdown_memcg_caches(struct kmem_cache *s,
688 		struct list_head *release, bool *need_rcu_barrier)
689 {
690 	return 0;
691 }
692 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
693 
694 void slab_kmem_cache_release(struct kmem_cache *s)
695 {
696 	__kmem_cache_release(s);
697 	destroy_memcg_params(s);
698 	kfree_const(s->name);
699 	kmem_cache_free(kmem_cache, s);
700 }
701 
702 void kmem_cache_destroy(struct kmem_cache *s)
703 {
704 	LIST_HEAD(release);
705 	bool need_rcu_barrier = false;
706 	int err;
707 
708 	if (unlikely(!s))
709 		return;
710 
711 	get_online_cpus();
712 	get_online_mems();
713 
714 	mutex_lock(&slab_mutex);
715 
716 	s->refcount--;
717 	if (s->refcount)
718 		goto out_unlock;
719 
720 	err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
721 	if (!err)
722 		err = shutdown_cache(s, &release, &need_rcu_barrier);
723 
724 	if (err) {
725 		pr_err("kmem_cache_destroy %s: "
726 		       "Slab cache still has objects\n", s->name);
727 		dump_stack();
728 	}
729 out_unlock:
730 	mutex_unlock(&slab_mutex);
731 
732 	put_online_mems();
733 	put_online_cpus();
734 
735 	release_caches(&release, need_rcu_barrier);
736 }
737 EXPORT_SYMBOL(kmem_cache_destroy);
738 
739 /**
740  * kmem_cache_shrink - Shrink a cache.
741  * @cachep: The cache to shrink.
742  *
743  * Releases as many slabs as possible for a cache.
744  * To help debugging, a zero exit status indicates all slabs were released.
745  */
746 int kmem_cache_shrink(struct kmem_cache *cachep)
747 {
748 	int ret;
749 
750 	get_online_cpus();
751 	get_online_mems();
752 	ret = __kmem_cache_shrink(cachep, false);
753 	put_online_mems();
754 	put_online_cpus();
755 	return ret;
756 }
757 EXPORT_SYMBOL(kmem_cache_shrink);
758 
759 bool slab_is_available(void)
760 {
761 	return slab_state >= UP;
762 }
763 
764 #ifndef CONFIG_SLOB
765 /* Create a cache during boot when no slab services are available yet */
766 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
767 		unsigned long flags)
768 {
769 	int err;
770 
771 	s->name = name;
772 	s->size = s->object_size = size;
773 	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
774 
775 	slab_init_memcg_params(s);
776 
777 	err = __kmem_cache_create(s, flags);
778 
779 	if (err)
780 		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
781 					name, size, err);
782 
783 	s->refcount = -1;	/* Exempt from merging for now */
784 }
785 
786 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
787 				unsigned long flags)
788 {
789 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
790 
791 	if (!s)
792 		panic("Out of memory when creating slab %s\n", name);
793 
794 	create_boot_cache(s, name, size, flags);
795 	list_add(&s->list, &slab_caches);
796 	s->refcount = 1;
797 	return s;
798 }
799 
800 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
801 EXPORT_SYMBOL(kmalloc_caches);
802 
803 #ifdef CONFIG_ZONE_DMA
804 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
805 EXPORT_SYMBOL(kmalloc_dma_caches);
806 #endif
807 
808 /*
809  * Conversion table for small slabs sizes / 8 to the index in the
810  * kmalloc array. This is necessary for slabs < 192 since we have non power
811  * of two cache sizes there. The size of larger slabs can be determined using
812  * fls.
813  */
814 static s8 size_index[24] = {
815 	3,	/* 8 */
816 	4,	/* 16 */
817 	5,	/* 24 */
818 	5,	/* 32 */
819 	6,	/* 40 */
820 	6,	/* 48 */
821 	6,	/* 56 */
822 	6,	/* 64 */
823 	1,	/* 72 */
824 	1,	/* 80 */
825 	1,	/* 88 */
826 	1,	/* 96 */
827 	7,	/* 104 */
828 	7,	/* 112 */
829 	7,	/* 120 */
830 	7,	/* 128 */
831 	2,	/* 136 */
832 	2,	/* 144 */
833 	2,	/* 152 */
834 	2,	/* 160 */
835 	2,	/* 168 */
836 	2,	/* 176 */
837 	2,	/* 184 */
838 	2	/* 192 */
839 };
840 
841 static inline int size_index_elem(size_t bytes)
842 {
843 	return (bytes - 1) / 8;
844 }
845 
846 /*
847  * Find the kmem_cache structure that serves a given size of
848  * allocation
849  */
850 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
851 {
852 	int index;
853 
854 	if (unlikely(size > KMALLOC_MAX_SIZE)) {
855 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
856 		return NULL;
857 	}
858 
859 	if (size <= 192) {
860 		if (!size)
861 			return ZERO_SIZE_PTR;
862 
863 		index = size_index[size_index_elem(size)];
864 	} else
865 		index = fls(size - 1);
866 
867 #ifdef CONFIG_ZONE_DMA
868 	if (unlikely((flags & GFP_DMA)))
869 		return kmalloc_dma_caches[index];
870 
871 #endif
872 	return kmalloc_caches[index];
873 }
874 
875 /*
876  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
877  * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
878  * kmalloc-67108864.
879  */
880 static struct {
881 	const char *name;
882 	unsigned long size;
883 } const kmalloc_info[] __initconst = {
884 	{NULL,                      0},		{"kmalloc-96",             96},
885 	{"kmalloc-192",           192},		{"kmalloc-8",               8},
886 	{"kmalloc-16",             16},		{"kmalloc-32",             32},
887 	{"kmalloc-64",             64},		{"kmalloc-128",           128},
888 	{"kmalloc-256",           256},		{"kmalloc-512",           512},
889 	{"kmalloc-1024",         1024},		{"kmalloc-2048",         2048},
890 	{"kmalloc-4096",         4096},		{"kmalloc-8192",         8192},
891 	{"kmalloc-16384",       16384},		{"kmalloc-32768",       32768},
892 	{"kmalloc-65536",       65536},		{"kmalloc-131072",     131072},
893 	{"kmalloc-262144",     262144},		{"kmalloc-524288",     524288},
894 	{"kmalloc-1048576",   1048576},		{"kmalloc-2097152",   2097152},
895 	{"kmalloc-4194304",   4194304},		{"kmalloc-8388608",   8388608},
896 	{"kmalloc-16777216", 16777216},		{"kmalloc-33554432", 33554432},
897 	{"kmalloc-67108864", 67108864}
898 };
899 
900 /*
901  * Patch up the size_index table if we have strange large alignment
902  * requirements for the kmalloc array. This is only the case for
903  * MIPS it seems. The standard arches will not generate any code here.
904  *
905  * Largest permitted alignment is 256 bytes due to the way we
906  * handle the index determination for the smaller caches.
907  *
908  * Make sure that nothing crazy happens if someone starts tinkering
909  * around with ARCH_KMALLOC_MINALIGN
910  */
911 void __init setup_kmalloc_cache_index_table(void)
912 {
913 	int i;
914 
915 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
916 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
917 
918 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
919 		int elem = size_index_elem(i);
920 
921 		if (elem >= ARRAY_SIZE(size_index))
922 			break;
923 		size_index[elem] = KMALLOC_SHIFT_LOW;
924 	}
925 
926 	if (KMALLOC_MIN_SIZE >= 64) {
927 		/*
928 		 * The 96 byte size cache is not used if the alignment
929 		 * is 64 byte.
930 		 */
931 		for (i = 64 + 8; i <= 96; i += 8)
932 			size_index[size_index_elem(i)] = 7;
933 
934 	}
935 
936 	if (KMALLOC_MIN_SIZE >= 128) {
937 		/*
938 		 * The 192 byte sized cache is not used if the alignment
939 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
940 		 * instead.
941 		 */
942 		for (i = 128 + 8; i <= 192; i += 8)
943 			size_index[size_index_elem(i)] = 8;
944 	}
945 }
946 
947 static void __init new_kmalloc_cache(int idx, unsigned long flags)
948 {
949 	kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
950 					kmalloc_info[idx].size, flags);
951 }
952 
953 /*
954  * Create the kmalloc array. Some of the regular kmalloc arrays
955  * may already have been created because they were needed to
956  * enable allocations for slab creation.
957  */
958 void __init create_kmalloc_caches(unsigned long flags)
959 {
960 	int i;
961 
962 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
963 		if (!kmalloc_caches[i])
964 			new_kmalloc_cache(i, flags);
965 
966 		/*
967 		 * Caches that are not of the two-to-the-power-of size.
968 		 * These have to be created immediately after the
969 		 * earlier power of two caches
970 		 */
971 		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
972 			new_kmalloc_cache(1, flags);
973 		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
974 			new_kmalloc_cache(2, flags);
975 	}
976 
977 	/* Kmalloc array is now usable */
978 	slab_state = UP;
979 
980 #ifdef CONFIG_ZONE_DMA
981 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
982 		struct kmem_cache *s = kmalloc_caches[i];
983 
984 		if (s) {
985 			int size = kmalloc_size(i);
986 			char *n = kasprintf(GFP_NOWAIT,
987 				 "dma-kmalloc-%d", size);
988 
989 			BUG_ON(!n);
990 			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
991 				size, SLAB_CACHE_DMA | flags);
992 		}
993 	}
994 #endif
995 }
996 #endif /* !CONFIG_SLOB */
997 
998 /*
999  * To avoid unnecessary overhead, we pass through large allocation requests
1000  * directly to the page allocator. We use __GFP_COMP, because we will need to
1001  * know the allocation order to free the pages properly in kfree.
1002  */
1003 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1004 {
1005 	void *ret;
1006 	struct page *page;
1007 
1008 	flags |= __GFP_COMP;
1009 	page = alloc_kmem_pages(flags, order);
1010 	ret = page ? page_address(page) : NULL;
1011 	kmemleak_alloc(ret, size, 1, flags);
1012 	kasan_kmalloc_large(ret, size);
1013 	return ret;
1014 }
1015 EXPORT_SYMBOL(kmalloc_order);
1016 
1017 #ifdef CONFIG_TRACING
1018 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1019 {
1020 	void *ret = kmalloc_order(size, flags, order);
1021 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1022 	return ret;
1023 }
1024 EXPORT_SYMBOL(kmalloc_order_trace);
1025 #endif
1026 
1027 #ifdef CONFIG_SLABINFO
1028 
1029 #ifdef CONFIG_SLAB
1030 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1031 #else
1032 #define SLABINFO_RIGHTS S_IRUSR
1033 #endif
1034 
1035 static void print_slabinfo_header(struct seq_file *m)
1036 {
1037 	/*
1038 	 * Output format version, so at least we can change it
1039 	 * without _too_ many complaints.
1040 	 */
1041 #ifdef CONFIG_DEBUG_SLAB
1042 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1043 #else
1044 	seq_puts(m, "slabinfo - version: 2.1\n");
1045 #endif
1046 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
1047 		 "<objperslab> <pagesperslab>");
1048 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1049 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1050 #ifdef CONFIG_DEBUG_SLAB
1051 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
1052 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1053 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1054 #endif
1055 	seq_putc(m, '\n');
1056 }
1057 
1058 void *slab_start(struct seq_file *m, loff_t *pos)
1059 {
1060 	mutex_lock(&slab_mutex);
1061 	return seq_list_start(&slab_caches, *pos);
1062 }
1063 
1064 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1065 {
1066 	return seq_list_next(p, &slab_caches, pos);
1067 }
1068 
1069 void slab_stop(struct seq_file *m, void *p)
1070 {
1071 	mutex_unlock(&slab_mutex);
1072 }
1073 
1074 static void
1075 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1076 {
1077 	struct kmem_cache *c;
1078 	struct slabinfo sinfo;
1079 
1080 	if (!is_root_cache(s))
1081 		return;
1082 
1083 	for_each_memcg_cache(c, s) {
1084 		memset(&sinfo, 0, sizeof(sinfo));
1085 		get_slabinfo(c, &sinfo);
1086 
1087 		info->active_slabs += sinfo.active_slabs;
1088 		info->num_slabs += sinfo.num_slabs;
1089 		info->shared_avail += sinfo.shared_avail;
1090 		info->active_objs += sinfo.active_objs;
1091 		info->num_objs += sinfo.num_objs;
1092 	}
1093 }
1094 
1095 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1096 {
1097 	struct slabinfo sinfo;
1098 
1099 	memset(&sinfo, 0, sizeof(sinfo));
1100 	get_slabinfo(s, &sinfo);
1101 
1102 	memcg_accumulate_slabinfo(s, &sinfo);
1103 
1104 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1105 		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1106 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1107 
1108 	seq_printf(m, " : tunables %4u %4u %4u",
1109 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1110 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1111 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1112 	slabinfo_show_stats(m, s);
1113 	seq_putc(m, '\n');
1114 }
1115 
1116 static int slab_show(struct seq_file *m, void *p)
1117 {
1118 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1119 
1120 	if (p == slab_caches.next)
1121 		print_slabinfo_header(m);
1122 	if (is_root_cache(s))
1123 		cache_show(s, m);
1124 	return 0;
1125 }
1126 
1127 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1128 int memcg_slab_show(struct seq_file *m, void *p)
1129 {
1130 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1131 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1132 
1133 	if (p == slab_caches.next)
1134 		print_slabinfo_header(m);
1135 	if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1136 		cache_show(s, m);
1137 	return 0;
1138 }
1139 #endif
1140 
1141 /*
1142  * slabinfo_op - iterator that generates /proc/slabinfo
1143  *
1144  * Output layout:
1145  * cache-name
1146  * num-active-objs
1147  * total-objs
1148  * object size
1149  * num-active-slabs
1150  * total-slabs
1151  * num-pages-per-slab
1152  * + further values on SMP and with statistics enabled
1153  */
1154 static const struct seq_operations slabinfo_op = {
1155 	.start = slab_start,
1156 	.next = slab_next,
1157 	.stop = slab_stop,
1158 	.show = slab_show,
1159 };
1160 
1161 static int slabinfo_open(struct inode *inode, struct file *file)
1162 {
1163 	return seq_open(file, &slabinfo_op);
1164 }
1165 
1166 static const struct file_operations proc_slabinfo_operations = {
1167 	.open		= slabinfo_open,
1168 	.read		= seq_read,
1169 	.write          = slabinfo_write,
1170 	.llseek		= seq_lseek,
1171 	.release	= seq_release,
1172 };
1173 
1174 static int __init slab_proc_init(void)
1175 {
1176 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1177 						&proc_slabinfo_operations);
1178 	return 0;
1179 }
1180 module_init(slab_proc_init);
1181 #endif /* CONFIG_SLABINFO */
1182 
1183 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1184 					   gfp_t flags)
1185 {
1186 	void *ret;
1187 	size_t ks = 0;
1188 
1189 	if (p)
1190 		ks = ksize(p);
1191 
1192 	if (ks >= new_size) {
1193 		kasan_krealloc((void *)p, new_size);
1194 		return (void *)p;
1195 	}
1196 
1197 	ret = kmalloc_track_caller(new_size, flags);
1198 	if (ret && p)
1199 		memcpy(ret, p, ks);
1200 
1201 	return ret;
1202 }
1203 
1204 /**
1205  * __krealloc - like krealloc() but don't free @p.
1206  * @p: object to reallocate memory for.
1207  * @new_size: how many bytes of memory are required.
1208  * @flags: the type of memory to allocate.
1209  *
1210  * This function is like krealloc() except it never frees the originally
1211  * allocated buffer. Use this if you don't want to free the buffer immediately
1212  * like, for example, with RCU.
1213  */
1214 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1215 {
1216 	if (unlikely(!new_size))
1217 		return ZERO_SIZE_PTR;
1218 
1219 	return __do_krealloc(p, new_size, flags);
1220 
1221 }
1222 EXPORT_SYMBOL(__krealloc);
1223 
1224 /**
1225  * krealloc - reallocate memory. The contents will remain unchanged.
1226  * @p: object to reallocate memory for.
1227  * @new_size: how many bytes of memory are required.
1228  * @flags: the type of memory to allocate.
1229  *
1230  * The contents of the object pointed to are preserved up to the
1231  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1232  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1233  * %NULL pointer, the object pointed to is freed.
1234  */
1235 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1236 {
1237 	void *ret;
1238 
1239 	if (unlikely(!new_size)) {
1240 		kfree(p);
1241 		return ZERO_SIZE_PTR;
1242 	}
1243 
1244 	ret = __do_krealloc(p, new_size, flags);
1245 	if (ret && p != ret)
1246 		kfree(p);
1247 
1248 	return ret;
1249 }
1250 EXPORT_SYMBOL(krealloc);
1251 
1252 /**
1253  * kzfree - like kfree but zero memory
1254  * @p: object to free memory of
1255  *
1256  * The memory of the object @p points to is zeroed before freed.
1257  * If @p is %NULL, kzfree() does nothing.
1258  *
1259  * Note: this function zeroes the whole allocated buffer which can be a good
1260  * deal bigger than the requested buffer size passed to kmalloc(). So be
1261  * careful when using this function in performance sensitive code.
1262  */
1263 void kzfree(const void *p)
1264 {
1265 	size_t ks;
1266 	void *mem = (void *)p;
1267 
1268 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1269 		return;
1270 	ks = ksize(mem);
1271 	memset(mem, 0, ks);
1272 	kfree(mem);
1273 }
1274 EXPORT_SYMBOL(kzfree);
1275 
1276 /* Tracepoints definitions. */
1277 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1278 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1279 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1280 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1281 EXPORT_TRACEPOINT_SYMBOL(kfree);
1282 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1283