xref: /openbmc/linux/mm/slab.h (revision aac5987a)
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4  * Internal slab definitions
5  */
6 
7 #ifdef CONFIG_SLOB
8 /*
9  * Common fields provided in kmem_cache by all slab allocators
10  * This struct is either used directly by the allocator (SLOB)
11  * or the allocator must include definitions for all fields
12  * provided in kmem_cache_common in their definition of kmem_cache.
13  *
14  * Once we can do anonymous structs (C11 standard) we could put a
15  * anonymous struct definition in these allocators so that the
16  * separate allocations in the kmem_cache structure of SLAB and
17  * SLUB is no longer needed.
18  */
19 struct kmem_cache {
20 	unsigned int object_size;/* The original size of the object */
21 	unsigned int size;	/* The aligned/padded/added on size  */
22 	unsigned int align;	/* Alignment as calculated */
23 	unsigned long flags;	/* Active flags on the slab */
24 	const char *name;	/* Slab name for sysfs */
25 	int refcount;		/* Use counter */
26 	void (*ctor)(void *);	/* Called on object slot creation */
27 	struct list_head list;	/* List of all slab caches on the system */
28 };
29 
30 #endif /* CONFIG_SLOB */
31 
32 #ifdef CONFIG_SLAB
33 #include <linux/slab_def.h>
34 #endif
35 
36 #ifdef CONFIG_SLUB
37 #include <linux/slub_def.h>
38 #endif
39 
40 #include <linux/memcontrol.h>
41 #include <linux/fault-inject.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/kasan.h>
44 #include <linux/kmemleak.h>
45 #include <linux/random.h>
46 
47 /*
48  * State of the slab allocator.
49  *
50  * This is used to describe the states of the allocator during bootup.
51  * Allocators use this to gradually bootstrap themselves. Most allocators
52  * have the problem that the structures used for managing slab caches are
53  * allocated from slab caches themselves.
54  */
55 enum slab_state {
56 	DOWN,			/* No slab functionality yet */
57 	PARTIAL,		/* SLUB: kmem_cache_node available */
58 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
59 	UP,			/* Slab caches usable but not all extras yet */
60 	FULL			/* Everything is working */
61 };
62 
63 extern enum slab_state slab_state;
64 
65 /* The slab cache mutex protects the management structures during changes */
66 extern struct mutex slab_mutex;
67 
68 /* The list of all slab caches on the system */
69 extern struct list_head slab_caches;
70 
71 /* The slab cache that manages slab cache information */
72 extern struct kmem_cache *kmem_cache;
73 
74 /* A table of kmalloc cache names and sizes */
75 extern const struct kmalloc_info_struct {
76 	const char *name;
77 	unsigned long size;
78 } kmalloc_info[];
79 
80 unsigned long calculate_alignment(unsigned long flags,
81 		unsigned long align, unsigned long size);
82 
83 #ifndef CONFIG_SLOB
84 /* Kmalloc array related functions */
85 void setup_kmalloc_cache_index_table(void);
86 void create_kmalloc_caches(unsigned long);
87 
88 /* Find the kmalloc slab corresponding for a certain size */
89 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90 #endif
91 
92 
93 /* Functions provided by the slab allocators */
94 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
95 
96 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
97 			unsigned long flags);
98 extern void create_boot_cache(struct kmem_cache *, const char *name,
99 			size_t size, unsigned long flags);
100 
101 int slab_unmergeable(struct kmem_cache *s);
102 struct kmem_cache *find_mergeable(size_t size, size_t align,
103 		unsigned long flags, const char *name, void (*ctor)(void *));
104 #ifndef CONFIG_SLOB
105 struct kmem_cache *
106 __kmem_cache_alias(const char *name, size_t size, size_t align,
107 		   unsigned long flags, void (*ctor)(void *));
108 
109 unsigned long kmem_cache_flags(unsigned long object_size,
110 	unsigned long flags, const char *name,
111 	void (*ctor)(void *));
112 #else
113 static inline struct kmem_cache *
114 __kmem_cache_alias(const char *name, size_t size, size_t align,
115 		   unsigned long flags, void (*ctor)(void *))
116 { return NULL; }
117 
118 static inline unsigned long kmem_cache_flags(unsigned long object_size,
119 	unsigned long flags, const char *name,
120 	void (*ctor)(void *))
121 {
122 	return flags;
123 }
124 #endif
125 
126 
127 /* Legal flag mask for kmem_cache_create(), for various configurations */
128 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
129 			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
130 
131 #if defined(CONFIG_DEBUG_SLAB)
132 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
133 #elif defined(CONFIG_SLUB_DEBUG)
134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
135 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
136 #else
137 #define SLAB_DEBUG_FLAGS (0)
138 #endif
139 
140 #if defined(CONFIG_SLAB)
141 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
142 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
143 			  SLAB_NOTRACK | SLAB_ACCOUNT)
144 #elif defined(CONFIG_SLUB)
145 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
146 			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
147 #else
148 #define SLAB_CACHE_FLAGS (0)
149 #endif
150 
151 /* Common flags available with current configuration */
152 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
153 
154 /* Common flags permitted for kmem_cache_create */
155 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
156 			      SLAB_RED_ZONE | \
157 			      SLAB_POISON | \
158 			      SLAB_STORE_USER | \
159 			      SLAB_TRACE | \
160 			      SLAB_CONSISTENCY_CHECKS | \
161 			      SLAB_MEM_SPREAD | \
162 			      SLAB_NOLEAKTRACE | \
163 			      SLAB_RECLAIM_ACCOUNT | \
164 			      SLAB_TEMPORARY | \
165 			      SLAB_NOTRACK | \
166 			      SLAB_ACCOUNT)
167 
168 int __kmem_cache_shutdown(struct kmem_cache *);
169 void __kmem_cache_release(struct kmem_cache *);
170 int __kmem_cache_shrink(struct kmem_cache *);
171 void __kmemcg_cache_deactivate(struct kmem_cache *s);
172 void slab_kmem_cache_release(struct kmem_cache *);
173 
174 struct seq_file;
175 struct file;
176 
177 struct slabinfo {
178 	unsigned long active_objs;
179 	unsigned long num_objs;
180 	unsigned long active_slabs;
181 	unsigned long num_slabs;
182 	unsigned long shared_avail;
183 	unsigned int limit;
184 	unsigned int batchcount;
185 	unsigned int shared;
186 	unsigned int objects_per_slab;
187 	unsigned int cache_order;
188 };
189 
190 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
191 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
192 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
193 		       size_t count, loff_t *ppos);
194 
195 /*
196  * Generic implementation of bulk operations
197  * These are useful for situations in which the allocator cannot
198  * perform optimizations. In that case segments of the object listed
199  * may be allocated or freed using these operations.
200  */
201 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
202 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
203 
204 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
205 
206 /* List of all root caches. */
207 extern struct list_head		slab_root_caches;
208 #define root_caches_node	memcg_params.__root_caches_node
209 
210 /*
211  * Iterate over all memcg caches of the given root cache. The caller must hold
212  * slab_mutex.
213  */
214 #define for_each_memcg_cache(iter, root) \
215 	list_for_each_entry(iter, &(root)->memcg_params.children, \
216 			    memcg_params.children_node)
217 
218 static inline bool is_root_cache(struct kmem_cache *s)
219 {
220 	return !s->memcg_params.root_cache;
221 }
222 
223 static inline bool slab_equal_or_root(struct kmem_cache *s,
224 				      struct kmem_cache *p)
225 {
226 	return p == s || p == s->memcg_params.root_cache;
227 }
228 
229 /*
230  * We use suffixes to the name in memcg because we can't have caches
231  * created in the system with the same name. But when we print them
232  * locally, better refer to them with the base name
233  */
234 static inline const char *cache_name(struct kmem_cache *s)
235 {
236 	if (!is_root_cache(s))
237 		s = s->memcg_params.root_cache;
238 	return s->name;
239 }
240 
241 /*
242  * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
243  * That said the caller must assure the memcg's cache won't go away by either
244  * taking a css reference to the owner cgroup, or holding the slab_mutex.
245  */
246 static inline struct kmem_cache *
247 cache_from_memcg_idx(struct kmem_cache *s, int idx)
248 {
249 	struct kmem_cache *cachep;
250 	struct memcg_cache_array *arr;
251 
252 	rcu_read_lock();
253 	arr = rcu_dereference(s->memcg_params.memcg_caches);
254 
255 	/*
256 	 * Make sure we will access the up-to-date value. The code updating
257 	 * memcg_caches issues a write barrier to match this (see
258 	 * memcg_create_kmem_cache()).
259 	 */
260 	cachep = lockless_dereference(arr->entries[idx]);
261 	rcu_read_unlock();
262 
263 	return cachep;
264 }
265 
266 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
267 {
268 	if (is_root_cache(s))
269 		return s;
270 	return s->memcg_params.root_cache;
271 }
272 
273 static __always_inline int memcg_charge_slab(struct page *page,
274 					     gfp_t gfp, int order,
275 					     struct kmem_cache *s)
276 {
277 	int ret;
278 
279 	if (!memcg_kmem_enabled())
280 		return 0;
281 	if (is_root_cache(s))
282 		return 0;
283 
284 	ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
285 	if (ret)
286 		return ret;
287 
288 	memcg_kmem_update_page_stat(page,
289 			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
290 			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
291 			1 << order);
292 	return 0;
293 }
294 
295 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
296 						struct kmem_cache *s)
297 {
298 	if (!memcg_kmem_enabled())
299 		return;
300 
301 	memcg_kmem_update_page_stat(page,
302 			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
303 			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
304 			-(1 << order));
305 	memcg_kmem_uncharge(page, order);
306 }
307 
308 extern void slab_init_memcg_params(struct kmem_cache *);
309 extern void memcg_link_cache(struct kmem_cache *s);
310 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
311 				void (*deact_fn)(struct kmem_cache *));
312 
313 #else /* CONFIG_MEMCG && !CONFIG_SLOB */
314 
315 /* If !memcg, all caches are root. */
316 #define slab_root_caches	slab_caches
317 #define root_caches_node	list
318 
319 #define for_each_memcg_cache(iter, root) \
320 	for ((void)(iter), (void)(root); 0; )
321 
322 static inline bool is_root_cache(struct kmem_cache *s)
323 {
324 	return true;
325 }
326 
327 static inline bool slab_equal_or_root(struct kmem_cache *s,
328 				      struct kmem_cache *p)
329 {
330 	return true;
331 }
332 
333 static inline const char *cache_name(struct kmem_cache *s)
334 {
335 	return s->name;
336 }
337 
338 static inline struct kmem_cache *
339 cache_from_memcg_idx(struct kmem_cache *s, int idx)
340 {
341 	return NULL;
342 }
343 
344 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
345 {
346 	return s;
347 }
348 
349 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
350 				    struct kmem_cache *s)
351 {
352 	return 0;
353 }
354 
355 static inline void memcg_uncharge_slab(struct page *page, int order,
356 				       struct kmem_cache *s)
357 {
358 }
359 
360 static inline void slab_init_memcg_params(struct kmem_cache *s)
361 {
362 }
363 
364 static inline void memcg_link_cache(struct kmem_cache *s)
365 {
366 }
367 
368 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
369 
370 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
371 {
372 	struct kmem_cache *cachep;
373 	struct page *page;
374 
375 	/*
376 	 * When kmemcg is not being used, both assignments should return the
377 	 * same value. but we don't want to pay the assignment price in that
378 	 * case. If it is not compiled in, the compiler should be smart enough
379 	 * to not do even the assignment. In that case, slab_equal_or_root
380 	 * will also be a constant.
381 	 */
382 	if (!memcg_kmem_enabled() &&
383 	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
384 		return s;
385 
386 	page = virt_to_head_page(x);
387 	cachep = page->slab_cache;
388 	if (slab_equal_or_root(cachep, s))
389 		return cachep;
390 
391 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
392 	       __func__, s->name, cachep->name);
393 	WARN_ON_ONCE(1);
394 	return s;
395 }
396 
397 static inline size_t slab_ksize(const struct kmem_cache *s)
398 {
399 #ifndef CONFIG_SLUB
400 	return s->object_size;
401 
402 #else /* CONFIG_SLUB */
403 # ifdef CONFIG_SLUB_DEBUG
404 	/*
405 	 * Debugging requires use of the padding between object
406 	 * and whatever may come after it.
407 	 */
408 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
409 		return s->object_size;
410 # endif
411 	if (s->flags & SLAB_KASAN)
412 		return s->object_size;
413 	/*
414 	 * If we have the need to store the freelist pointer
415 	 * back there or track user information then we can
416 	 * only use the space before that information.
417 	 */
418 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
419 		return s->inuse;
420 	/*
421 	 * Else we can use all the padding etc for the allocation
422 	 */
423 	return s->size;
424 #endif
425 }
426 
427 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
428 						     gfp_t flags)
429 {
430 	flags &= gfp_allowed_mask;
431 	lockdep_trace_alloc(flags);
432 	might_sleep_if(gfpflags_allow_blocking(flags));
433 
434 	if (should_failslab(s, flags))
435 		return NULL;
436 
437 	if (memcg_kmem_enabled() &&
438 	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
439 		return memcg_kmem_get_cache(s);
440 
441 	return s;
442 }
443 
444 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
445 					size_t size, void **p)
446 {
447 	size_t i;
448 
449 	flags &= gfp_allowed_mask;
450 	for (i = 0; i < size; i++) {
451 		void *object = p[i];
452 
453 		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
454 		kmemleak_alloc_recursive(object, s->object_size, 1,
455 					 s->flags, flags);
456 		kasan_slab_alloc(s, object, flags);
457 	}
458 
459 	if (memcg_kmem_enabled())
460 		memcg_kmem_put_cache(s);
461 }
462 
463 #ifndef CONFIG_SLOB
464 /*
465  * The slab lists for all objects.
466  */
467 struct kmem_cache_node {
468 	spinlock_t list_lock;
469 
470 #ifdef CONFIG_SLAB
471 	struct list_head slabs_partial;	/* partial list first, better asm code */
472 	struct list_head slabs_full;
473 	struct list_head slabs_free;
474 	unsigned long total_slabs;	/* length of all slab lists */
475 	unsigned long free_slabs;	/* length of free slab list only */
476 	unsigned long free_objects;
477 	unsigned int free_limit;
478 	unsigned int colour_next;	/* Per-node cache coloring */
479 	struct array_cache *shared;	/* shared per node */
480 	struct alien_cache **alien;	/* on other nodes */
481 	unsigned long next_reap;	/* updated without locking */
482 	int free_touched;		/* updated without locking */
483 #endif
484 
485 #ifdef CONFIG_SLUB
486 	unsigned long nr_partial;
487 	struct list_head partial;
488 #ifdef CONFIG_SLUB_DEBUG
489 	atomic_long_t nr_slabs;
490 	atomic_long_t total_objects;
491 	struct list_head full;
492 #endif
493 #endif
494 
495 };
496 
497 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
498 {
499 	return s->node[node];
500 }
501 
502 /*
503  * Iterator over all nodes. The body will be executed for each node that has
504  * a kmem_cache_node structure allocated (which is true for all online nodes)
505  */
506 #define for_each_kmem_cache_node(__s, __node, __n) \
507 	for (__node = 0; __node < nr_node_ids; __node++) \
508 		 if ((__n = get_node(__s, __node)))
509 
510 #endif
511 
512 void *slab_start(struct seq_file *m, loff_t *pos);
513 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
514 void slab_stop(struct seq_file *m, void *p);
515 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
516 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
517 void memcg_slab_stop(struct seq_file *m, void *p);
518 int memcg_slab_show(struct seq_file *m, void *p);
519 
520 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
521 
522 #ifdef CONFIG_SLAB_FREELIST_RANDOM
523 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
524 			gfp_t gfp);
525 void cache_random_seq_destroy(struct kmem_cache *cachep);
526 #else
527 static inline int cache_random_seq_create(struct kmem_cache *cachep,
528 					unsigned int count, gfp_t gfp)
529 {
530 	return 0;
531 }
532 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
533 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
534 
535 #endif /* MM_SLAB_H */
536