xref: /openbmc/linux/mm/slab.h (revision f9a82c48)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 /*
5  * Internal slab definitions
6  */
7 
8 #ifdef CONFIG_SLOB
9 /*
10  * Common fields provided in kmem_cache by all slab allocators
11  * This struct is either used directly by the allocator (SLOB)
12  * or the allocator must include definitions for all fields
13  * provided in kmem_cache_common in their definition of kmem_cache.
14  *
15  * Once we can do anonymous structs (C11 standard) we could put a
16  * anonymous struct definition in these allocators so that the
17  * separate allocations in the kmem_cache structure of SLAB and
18  * SLUB is no longer needed.
19  */
20 struct kmem_cache {
21 	unsigned int object_size;/* The original size of the object */
22 	unsigned int size;	/* The aligned/padded/added on size  */
23 	unsigned int align;	/* Alignment as calculated */
24 	slab_flags_t flags;	/* Active flags on the slab */
25 	unsigned int useroffset;/* Usercopy region offset */
26 	unsigned int usersize;	/* Usercopy region size */
27 	const char *name;	/* Slab name for sysfs */
28 	int refcount;		/* Use counter */
29 	void (*ctor)(void *);	/* Called on object slot creation */
30 	struct list_head list;	/* List of all slab caches on the system */
31 };
32 
33 #endif /* CONFIG_SLOB */
34 
35 #ifdef CONFIG_SLAB
36 #include <linux/slab_def.h>
37 #endif
38 
39 #ifdef CONFIG_SLUB
40 #include <linux/slub_def.h>
41 #endif
42 
43 #include <linux/memcontrol.h>
44 #include <linux/fault-inject.h>
45 #include <linux/kasan.h>
46 #include <linux/kmemleak.h>
47 #include <linux/random.h>
48 #include <linux/sched/mm.h>
49 
50 /*
51  * State of the slab allocator.
52  *
53  * This is used to describe the states of the allocator during bootup.
54  * Allocators use this to gradually bootstrap themselves. Most allocators
55  * have the problem that the structures used for managing slab caches are
56  * allocated from slab caches themselves.
57  */
58 enum slab_state {
59 	DOWN,			/* No slab functionality yet */
60 	PARTIAL,		/* SLUB: kmem_cache_node available */
61 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
62 	UP,			/* Slab caches usable but not all extras yet */
63 	FULL			/* Everything is working */
64 };
65 
66 extern enum slab_state slab_state;
67 
68 /* The slab cache mutex protects the management structures during changes */
69 extern struct mutex slab_mutex;
70 
71 /* The list of all slab caches on the system */
72 extern struct list_head slab_caches;
73 
74 /* The slab cache that manages slab cache information */
75 extern struct kmem_cache *kmem_cache;
76 
77 /* A table of kmalloc cache names and sizes */
78 extern const struct kmalloc_info_struct {
79 	const char *name;
80 	unsigned int size;
81 } kmalloc_info[];
82 
83 #ifndef CONFIG_SLOB
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(slab_flags_t);
87 
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90 #endif
91 
92 
93 /* Functions provided by the slab allocators */
94 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
95 
96 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 			slab_flags_t flags, unsigned int useroffset,
98 			unsigned int usersize);
99 extern void create_boot_cache(struct kmem_cache *, const char *name,
100 			unsigned int size, slab_flags_t flags,
101 			unsigned int useroffset, unsigned int usersize);
102 
103 int slab_unmergeable(struct kmem_cache *s);
104 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
105 		slab_flags_t flags, const char *name, void (*ctor)(void *));
106 #ifndef CONFIG_SLOB
107 struct kmem_cache *
108 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
109 		   slab_flags_t flags, void (*ctor)(void *));
110 
111 slab_flags_t kmem_cache_flags(unsigned int object_size,
112 	slab_flags_t flags, const char *name,
113 	void (*ctor)(void *));
114 #else
115 static inline struct kmem_cache *
116 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117 		   slab_flags_t flags, void (*ctor)(void *))
118 { return NULL; }
119 
120 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
121 	slab_flags_t flags, const char *name,
122 	void (*ctor)(void *))
123 {
124 	return flags;
125 }
126 #endif
127 
128 
129 /* Legal flag mask for kmem_cache_create(), for various configurations */
130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
132 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
133 
134 #if defined(CONFIG_DEBUG_SLAB)
135 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
136 #elif defined(CONFIG_SLUB_DEBUG)
137 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
138 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
139 #else
140 #define SLAB_DEBUG_FLAGS (0)
141 #endif
142 
143 #if defined(CONFIG_SLAB)
144 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
145 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
146 			  SLAB_ACCOUNT)
147 #elif defined(CONFIG_SLUB)
148 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
149 			  SLAB_TEMPORARY | SLAB_ACCOUNT)
150 #else
151 #define SLAB_CACHE_FLAGS (0)
152 #endif
153 
154 /* Common flags available with current configuration */
155 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
156 
157 /* Common flags permitted for kmem_cache_create */
158 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
159 			      SLAB_RED_ZONE | \
160 			      SLAB_POISON | \
161 			      SLAB_STORE_USER | \
162 			      SLAB_TRACE | \
163 			      SLAB_CONSISTENCY_CHECKS | \
164 			      SLAB_MEM_SPREAD | \
165 			      SLAB_NOLEAKTRACE | \
166 			      SLAB_RECLAIM_ACCOUNT | \
167 			      SLAB_TEMPORARY | \
168 			      SLAB_ACCOUNT)
169 
170 bool __kmem_cache_empty(struct kmem_cache *);
171 int __kmem_cache_shutdown(struct kmem_cache *);
172 void __kmem_cache_release(struct kmem_cache *);
173 int __kmem_cache_shrink(struct kmem_cache *);
174 void __kmemcg_cache_deactivate(struct kmem_cache *s);
175 void slab_kmem_cache_release(struct kmem_cache *);
176 
177 struct seq_file;
178 struct file;
179 
180 struct slabinfo {
181 	unsigned long active_objs;
182 	unsigned long num_objs;
183 	unsigned long active_slabs;
184 	unsigned long num_slabs;
185 	unsigned long shared_avail;
186 	unsigned int limit;
187 	unsigned int batchcount;
188 	unsigned int shared;
189 	unsigned int objects_per_slab;
190 	unsigned int cache_order;
191 };
192 
193 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
194 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
195 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
196 		       size_t count, loff_t *ppos);
197 
198 /*
199  * Generic implementation of bulk operations
200  * These are useful for situations in which the allocator cannot
201  * perform optimizations. In that case segments of the object listed
202  * may be allocated or freed using these operations.
203  */
204 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
205 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
206 
207 #ifdef CONFIG_MEMCG_KMEM
208 
209 /* List of all root caches. */
210 extern struct list_head		slab_root_caches;
211 #define root_caches_node	memcg_params.__root_caches_node
212 
213 /*
214  * Iterate over all memcg caches of the given root cache. The caller must hold
215  * slab_mutex.
216  */
217 #define for_each_memcg_cache(iter, root) \
218 	list_for_each_entry(iter, &(root)->memcg_params.children, \
219 			    memcg_params.children_node)
220 
221 static inline bool is_root_cache(struct kmem_cache *s)
222 {
223 	return !s->memcg_params.root_cache;
224 }
225 
226 static inline bool slab_equal_or_root(struct kmem_cache *s,
227 				      struct kmem_cache *p)
228 {
229 	return p == s || p == s->memcg_params.root_cache;
230 }
231 
232 /*
233  * We use suffixes to the name in memcg because we can't have caches
234  * created in the system with the same name. But when we print them
235  * locally, better refer to them with the base name
236  */
237 static inline const char *cache_name(struct kmem_cache *s)
238 {
239 	if (!is_root_cache(s))
240 		s = s->memcg_params.root_cache;
241 	return s->name;
242 }
243 
244 /*
245  * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
246  * That said the caller must assure the memcg's cache won't go away by either
247  * taking a css reference to the owner cgroup, or holding the slab_mutex.
248  */
249 static inline struct kmem_cache *
250 cache_from_memcg_idx(struct kmem_cache *s, int idx)
251 {
252 	struct kmem_cache *cachep;
253 	struct memcg_cache_array *arr;
254 
255 	rcu_read_lock();
256 	arr = rcu_dereference(s->memcg_params.memcg_caches);
257 
258 	/*
259 	 * Make sure we will access the up-to-date value. The code updating
260 	 * memcg_caches issues a write barrier to match this (see
261 	 * memcg_create_kmem_cache()).
262 	 */
263 	cachep = READ_ONCE(arr->entries[idx]);
264 	rcu_read_unlock();
265 
266 	return cachep;
267 }
268 
269 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
270 {
271 	if (is_root_cache(s))
272 		return s;
273 	return s->memcg_params.root_cache;
274 }
275 
276 static __always_inline int memcg_charge_slab(struct page *page,
277 					     gfp_t gfp, int order,
278 					     struct kmem_cache *s)
279 {
280 	if (is_root_cache(s))
281 		return 0;
282 	return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
283 }
284 
285 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
286 						struct kmem_cache *s)
287 {
288 	memcg_kmem_uncharge(page, order);
289 }
290 
291 extern void slab_init_memcg_params(struct kmem_cache *);
292 extern void memcg_link_cache(struct kmem_cache *s);
293 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
294 				void (*deact_fn)(struct kmem_cache *));
295 
296 #else /* CONFIG_MEMCG_KMEM */
297 
298 /* If !memcg, all caches are root. */
299 #define slab_root_caches	slab_caches
300 #define root_caches_node	list
301 
302 #define for_each_memcg_cache(iter, root) \
303 	for ((void)(iter), (void)(root); 0; )
304 
305 static inline bool is_root_cache(struct kmem_cache *s)
306 {
307 	return true;
308 }
309 
310 static inline bool slab_equal_or_root(struct kmem_cache *s,
311 				      struct kmem_cache *p)
312 {
313 	return true;
314 }
315 
316 static inline const char *cache_name(struct kmem_cache *s)
317 {
318 	return s->name;
319 }
320 
321 static inline struct kmem_cache *
322 cache_from_memcg_idx(struct kmem_cache *s, int idx)
323 {
324 	return NULL;
325 }
326 
327 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
328 {
329 	return s;
330 }
331 
332 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
333 				    struct kmem_cache *s)
334 {
335 	return 0;
336 }
337 
338 static inline void memcg_uncharge_slab(struct page *page, int order,
339 				       struct kmem_cache *s)
340 {
341 }
342 
343 static inline void slab_init_memcg_params(struct kmem_cache *s)
344 {
345 }
346 
347 static inline void memcg_link_cache(struct kmem_cache *s)
348 {
349 }
350 
351 #endif /* CONFIG_MEMCG_KMEM */
352 
353 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
354 {
355 	struct kmem_cache *cachep;
356 	struct page *page;
357 
358 	/*
359 	 * When kmemcg is not being used, both assignments should return the
360 	 * same value. but we don't want to pay the assignment price in that
361 	 * case. If it is not compiled in, the compiler should be smart enough
362 	 * to not do even the assignment. In that case, slab_equal_or_root
363 	 * will also be a constant.
364 	 */
365 	if (!memcg_kmem_enabled() &&
366 	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
367 		return s;
368 
369 	page = virt_to_head_page(x);
370 	cachep = page->slab_cache;
371 	if (slab_equal_or_root(cachep, s))
372 		return cachep;
373 
374 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
375 	       __func__, s->name, cachep->name);
376 	WARN_ON_ONCE(1);
377 	return s;
378 }
379 
380 static inline size_t slab_ksize(const struct kmem_cache *s)
381 {
382 #ifndef CONFIG_SLUB
383 	return s->object_size;
384 
385 #else /* CONFIG_SLUB */
386 # ifdef CONFIG_SLUB_DEBUG
387 	/*
388 	 * Debugging requires use of the padding between object
389 	 * and whatever may come after it.
390 	 */
391 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
392 		return s->object_size;
393 # endif
394 	if (s->flags & SLAB_KASAN)
395 		return s->object_size;
396 	/*
397 	 * If we have the need to store the freelist pointer
398 	 * back there or track user information then we can
399 	 * only use the space before that information.
400 	 */
401 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
402 		return s->inuse;
403 	/*
404 	 * Else we can use all the padding etc for the allocation
405 	 */
406 	return s->size;
407 #endif
408 }
409 
410 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
411 						     gfp_t flags)
412 {
413 	flags &= gfp_allowed_mask;
414 
415 	fs_reclaim_acquire(flags);
416 	fs_reclaim_release(flags);
417 
418 	might_sleep_if(gfpflags_allow_blocking(flags));
419 
420 	if (should_failslab(s, flags))
421 		return NULL;
422 
423 	if (memcg_kmem_enabled() &&
424 	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
425 		return memcg_kmem_get_cache(s);
426 
427 	return s;
428 }
429 
430 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
431 					size_t size, void **p)
432 {
433 	size_t i;
434 
435 	flags &= gfp_allowed_mask;
436 	for (i = 0; i < size; i++) {
437 		p[i] = kasan_slab_alloc(s, p[i], flags);
438 		/* As p[i] might get tagged, call kmemleak hook after KASAN. */
439 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
440 					 s->flags, flags);
441 	}
442 
443 	if (memcg_kmem_enabled())
444 		memcg_kmem_put_cache(s);
445 }
446 
447 #ifndef CONFIG_SLOB
448 /*
449  * The slab lists for all objects.
450  */
451 struct kmem_cache_node {
452 	spinlock_t list_lock;
453 
454 #ifdef CONFIG_SLAB
455 	struct list_head slabs_partial;	/* partial list first, better asm code */
456 	struct list_head slabs_full;
457 	struct list_head slabs_free;
458 	unsigned long total_slabs;	/* length of all slab lists */
459 	unsigned long free_slabs;	/* length of free slab list only */
460 	unsigned long free_objects;
461 	unsigned int free_limit;
462 	unsigned int colour_next;	/* Per-node cache coloring */
463 	struct array_cache *shared;	/* shared per node */
464 	struct alien_cache **alien;	/* on other nodes */
465 	unsigned long next_reap;	/* updated without locking */
466 	int free_touched;		/* updated without locking */
467 #endif
468 
469 #ifdef CONFIG_SLUB
470 	unsigned long nr_partial;
471 	struct list_head partial;
472 #ifdef CONFIG_SLUB_DEBUG
473 	atomic_long_t nr_slabs;
474 	atomic_long_t total_objects;
475 	struct list_head full;
476 #endif
477 #endif
478 
479 };
480 
481 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
482 {
483 	return s->node[node];
484 }
485 
486 /*
487  * Iterator over all nodes. The body will be executed for each node that has
488  * a kmem_cache_node structure allocated (which is true for all online nodes)
489  */
490 #define for_each_kmem_cache_node(__s, __node, __n) \
491 	for (__node = 0; __node < nr_node_ids; __node++) \
492 		 if ((__n = get_node(__s, __node)))
493 
494 #endif
495 
496 void *slab_start(struct seq_file *m, loff_t *pos);
497 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
498 void slab_stop(struct seq_file *m, void *p);
499 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
500 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
501 void memcg_slab_stop(struct seq_file *m, void *p);
502 int memcg_slab_show(struct seq_file *m, void *p);
503 
504 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
505 void dump_unreclaimable_slab(void);
506 #else
507 static inline void dump_unreclaimable_slab(void)
508 {
509 }
510 #endif
511 
512 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
513 
514 #ifdef CONFIG_SLAB_FREELIST_RANDOM
515 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
516 			gfp_t gfp);
517 void cache_random_seq_destroy(struct kmem_cache *cachep);
518 #else
519 static inline int cache_random_seq_create(struct kmem_cache *cachep,
520 					unsigned int count, gfp_t gfp)
521 {
522 	return 0;
523 }
524 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
525 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
526 
527 #endif /* MM_SLAB_H */
528