xref: /openbmc/linux/mm/slab.h (revision 0edbfea5)
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4  * Internal slab definitions
5  */
6 
7 #ifdef CONFIG_SLOB
8 /*
9  * Common fields provided in kmem_cache by all slab allocators
10  * This struct is either used directly by the allocator (SLOB)
11  * or the allocator must include definitions for all fields
12  * provided in kmem_cache_common in their definition of kmem_cache.
13  *
14  * Once we can do anonymous structs (C11 standard) we could put a
15  * anonymous struct definition in these allocators so that the
16  * separate allocations in the kmem_cache structure of SLAB and
17  * SLUB is no longer needed.
18  */
19 struct kmem_cache {
20 	unsigned int object_size;/* The original size of the object */
21 	unsigned int size;	/* The aligned/padded/added on size  */
22 	unsigned int align;	/* Alignment as calculated */
23 	unsigned long flags;	/* Active flags on the slab */
24 	const char *name;	/* Slab name for sysfs */
25 	int refcount;		/* Use counter */
26 	void (*ctor)(void *);	/* Called on object slot creation */
27 	struct list_head list;	/* List of all slab caches on the system */
28 };
29 
30 #endif /* CONFIG_SLOB */
31 
32 #ifdef CONFIG_SLAB
33 #include <linux/slab_def.h>
34 #endif
35 
36 #ifdef CONFIG_SLUB
37 #include <linux/slub_def.h>
38 #endif
39 
40 #include <linux/memcontrol.h>
41 #include <linux/fault-inject.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/kasan.h>
44 #include <linux/kmemleak.h>
45 
46 /*
47  * State of the slab allocator.
48  *
49  * This is used to describe the states of the allocator during bootup.
50  * Allocators use this to gradually bootstrap themselves. Most allocators
51  * have the problem that the structures used for managing slab caches are
52  * allocated from slab caches themselves.
53  */
54 enum slab_state {
55 	DOWN,			/* No slab functionality yet */
56 	PARTIAL,		/* SLUB: kmem_cache_node available */
57 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
58 	UP,			/* Slab caches usable but not all extras yet */
59 	FULL			/* Everything is working */
60 };
61 
62 extern enum slab_state slab_state;
63 
64 /* The slab cache mutex protects the management structures during changes */
65 extern struct mutex slab_mutex;
66 
67 /* The list of all slab caches on the system */
68 extern struct list_head slab_caches;
69 
70 /* The slab cache that manages slab cache information */
71 extern struct kmem_cache *kmem_cache;
72 
73 unsigned long calculate_alignment(unsigned long flags,
74 		unsigned long align, unsigned long size);
75 
76 #ifndef CONFIG_SLOB
77 /* Kmalloc array related functions */
78 void setup_kmalloc_cache_index_table(void);
79 void create_kmalloc_caches(unsigned long);
80 
81 /* Find the kmalloc slab corresponding for a certain size */
82 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
83 #endif
84 
85 
86 /* Functions provided by the slab allocators */
87 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
88 
89 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
90 			unsigned long flags);
91 extern void create_boot_cache(struct kmem_cache *, const char *name,
92 			size_t size, unsigned long flags);
93 
94 int slab_unmergeable(struct kmem_cache *s);
95 struct kmem_cache *find_mergeable(size_t size, size_t align,
96 		unsigned long flags, const char *name, void (*ctor)(void *));
97 #ifndef CONFIG_SLOB
98 struct kmem_cache *
99 __kmem_cache_alias(const char *name, size_t size, size_t align,
100 		   unsigned long flags, void (*ctor)(void *));
101 
102 unsigned long kmem_cache_flags(unsigned long object_size,
103 	unsigned long flags, const char *name,
104 	void (*ctor)(void *));
105 #else
106 static inline struct kmem_cache *
107 __kmem_cache_alias(const char *name, size_t size, size_t align,
108 		   unsigned long flags, void (*ctor)(void *))
109 { return NULL; }
110 
111 static inline unsigned long kmem_cache_flags(unsigned long object_size,
112 	unsigned long flags, const char *name,
113 	void (*ctor)(void *))
114 {
115 	return flags;
116 }
117 #endif
118 
119 
120 /* Legal flag mask for kmem_cache_create(), for various configurations */
121 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
122 			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
123 
124 #if defined(CONFIG_DEBUG_SLAB)
125 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
126 #elif defined(CONFIG_SLUB_DEBUG)
127 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
128 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
129 #else
130 #define SLAB_DEBUG_FLAGS (0)
131 #endif
132 
133 #if defined(CONFIG_SLAB)
134 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
135 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
136 			  SLAB_NOTRACK | SLAB_ACCOUNT)
137 #elif defined(CONFIG_SLUB)
138 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
139 			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
140 #else
141 #define SLAB_CACHE_FLAGS (0)
142 #endif
143 
144 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
145 
146 int __kmem_cache_shutdown(struct kmem_cache *);
147 void __kmem_cache_release(struct kmem_cache *);
148 int __kmem_cache_shrink(struct kmem_cache *, bool);
149 void slab_kmem_cache_release(struct kmem_cache *);
150 
151 struct seq_file;
152 struct file;
153 
154 struct slabinfo {
155 	unsigned long active_objs;
156 	unsigned long num_objs;
157 	unsigned long active_slabs;
158 	unsigned long num_slabs;
159 	unsigned long shared_avail;
160 	unsigned int limit;
161 	unsigned int batchcount;
162 	unsigned int shared;
163 	unsigned int objects_per_slab;
164 	unsigned int cache_order;
165 };
166 
167 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
168 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
169 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170 		       size_t count, loff_t *ppos);
171 
172 /*
173  * Generic implementation of bulk operations
174  * These are useful for situations in which the allocator cannot
175  * perform optimizations. In that case segments of the object listed
176  * may be allocated or freed using these operations.
177  */
178 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
179 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
180 
181 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
182 /*
183  * Iterate over all memcg caches of the given root cache. The caller must hold
184  * slab_mutex.
185  */
186 #define for_each_memcg_cache(iter, root) \
187 	list_for_each_entry(iter, &(root)->memcg_params.list, \
188 			    memcg_params.list)
189 
190 static inline bool is_root_cache(struct kmem_cache *s)
191 {
192 	return s->memcg_params.is_root_cache;
193 }
194 
195 static inline bool slab_equal_or_root(struct kmem_cache *s,
196 				      struct kmem_cache *p)
197 {
198 	return p == s || p == s->memcg_params.root_cache;
199 }
200 
201 /*
202  * We use suffixes to the name in memcg because we can't have caches
203  * created in the system with the same name. But when we print them
204  * locally, better refer to them with the base name
205  */
206 static inline const char *cache_name(struct kmem_cache *s)
207 {
208 	if (!is_root_cache(s))
209 		s = s->memcg_params.root_cache;
210 	return s->name;
211 }
212 
213 /*
214  * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
215  * That said the caller must assure the memcg's cache won't go away by either
216  * taking a css reference to the owner cgroup, or holding the slab_mutex.
217  */
218 static inline struct kmem_cache *
219 cache_from_memcg_idx(struct kmem_cache *s, int idx)
220 {
221 	struct kmem_cache *cachep;
222 	struct memcg_cache_array *arr;
223 
224 	rcu_read_lock();
225 	arr = rcu_dereference(s->memcg_params.memcg_caches);
226 
227 	/*
228 	 * Make sure we will access the up-to-date value. The code updating
229 	 * memcg_caches issues a write barrier to match this (see
230 	 * memcg_create_kmem_cache()).
231 	 */
232 	cachep = lockless_dereference(arr->entries[idx]);
233 	rcu_read_unlock();
234 
235 	return cachep;
236 }
237 
238 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
239 {
240 	if (is_root_cache(s))
241 		return s;
242 	return s->memcg_params.root_cache;
243 }
244 
245 static __always_inline int memcg_charge_slab(struct page *page,
246 					     gfp_t gfp, int order,
247 					     struct kmem_cache *s)
248 {
249 	int ret;
250 
251 	if (!memcg_kmem_enabled())
252 		return 0;
253 	if (is_root_cache(s))
254 		return 0;
255 
256 	ret = __memcg_kmem_charge_memcg(page, gfp, order,
257 					s->memcg_params.memcg);
258 	if (ret)
259 		return ret;
260 
261 	memcg_kmem_update_page_stat(page,
262 			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
263 			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264 			1 << order);
265 	return 0;
266 }
267 
268 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269 						struct kmem_cache *s)
270 {
271 	memcg_kmem_update_page_stat(page,
272 			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
273 			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
274 			-(1 << order));
275 	memcg_kmem_uncharge(page, order);
276 }
277 
278 extern void slab_init_memcg_params(struct kmem_cache *);
279 
280 #else /* CONFIG_MEMCG && !CONFIG_SLOB */
281 
282 #define for_each_memcg_cache(iter, root) \
283 	for ((void)(iter), (void)(root); 0; )
284 
285 static inline bool is_root_cache(struct kmem_cache *s)
286 {
287 	return true;
288 }
289 
290 static inline bool slab_equal_or_root(struct kmem_cache *s,
291 				      struct kmem_cache *p)
292 {
293 	return true;
294 }
295 
296 static inline const char *cache_name(struct kmem_cache *s)
297 {
298 	return s->name;
299 }
300 
301 static inline struct kmem_cache *
302 cache_from_memcg_idx(struct kmem_cache *s, int idx)
303 {
304 	return NULL;
305 }
306 
307 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
308 {
309 	return s;
310 }
311 
312 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
313 				    struct kmem_cache *s)
314 {
315 	return 0;
316 }
317 
318 static inline void memcg_uncharge_slab(struct page *page, int order,
319 				       struct kmem_cache *s)
320 {
321 }
322 
323 static inline void slab_init_memcg_params(struct kmem_cache *s)
324 {
325 }
326 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
327 
328 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
329 {
330 	struct kmem_cache *cachep;
331 	struct page *page;
332 
333 	/*
334 	 * When kmemcg is not being used, both assignments should return the
335 	 * same value. but we don't want to pay the assignment price in that
336 	 * case. If it is not compiled in, the compiler should be smart enough
337 	 * to not do even the assignment. In that case, slab_equal_or_root
338 	 * will also be a constant.
339 	 */
340 	if (!memcg_kmem_enabled() &&
341 	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
342 		return s;
343 
344 	page = virt_to_head_page(x);
345 	cachep = page->slab_cache;
346 	if (slab_equal_or_root(cachep, s))
347 		return cachep;
348 
349 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
350 	       __func__, s->name, cachep->name);
351 	WARN_ON_ONCE(1);
352 	return s;
353 }
354 
355 static inline size_t slab_ksize(const struct kmem_cache *s)
356 {
357 #ifndef CONFIG_SLUB
358 	return s->object_size;
359 
360 #else /* CONFIG_SLUB */
361 # ifdef CONFIG_SLUB_DEBUG
362 	/*
363 	 * Debugging requires use of the padding between object
364 	 * and whatever may come after it.
365 	 */
366 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
367 		return s->object_size;
368 # endif
369 	/*
370 	 * If we have the need to store the freelist pointer
371 	 * back there or track user information then we can
372 	 * only use the space before that information.
373 	 */
374 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
375 		return s->inuse;
376 	/*
377 	 * Else we can use all the padding etc for the allocation
378 	 */
379 	return s->size;
380 #endif
381 }
382 
383 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
384 						     gfp_t flags)
385 {
386 	flags &= gfp_allowed_mask;
387 	lockdep_trace_alloc(flags);
388 	might_sleep_if(gfpflags_allow_blocking(flags));
389 
390 	if (should_failslab(s, flags))
391 		return NULL;
392 
393 	return memcg_kmem_get_cache(s, flags);
394 }
395 
396 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
397 					size_t size, void **p)
398 {
399 	size_t i;
400 
401 	flags &= gfp_allowed_mask;
402 	for (i = 0; i < size; i++) {
403 		void *object = p[i];
404 
405 		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
406 		kmemleak_alloc_recursive(object, s->object_size, 1,
407 					 s->flags, flags);
408 		kasan_slab_alloc(s, object, flags);
409 	}
410 	memcg_kmem_put_cache(s);
411 }
412 
413 #ifndef CONFIG_SLOB
414 /*
415  * The slab lists for all objects.
416  */
417 struct kmem_cache_node {
418 	spinlock_t list_lock;
419 
420 #ifdef CONFIG_SLAB
421 	struct list_head slabs_partial;	/* partial list first, better asm code */
422 	struct list_head slabs_full;
423 	struct list_head slabs_free;
424 	unsigned long free_objects;
425 	unsigned int free_limit;
426 	unsigned int colour_next;	/* Per-node cache coloring */
427 	struct array_cache *shared;	/* shared per node */
428 	struct alien_cache **alien;	/* on other nodes */
429 	unsigned long next_reap;	/* updated without locking */
430 	int free_touched;		/* updated without locking */
431 #endif
432 
433 #ifdef CONFIG_SLUB
434 	unsigned long nr_partial;
435 	struct list_head partial;
436 #ifdef CONFIG_SLUB_DEBUG
437 	atomic_long_t nr_slabs;
438 	atomic_long_t total_objects;
439 	struct list_head full;
440 #endif
441 #endif
442 
443 };
444 
445 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
446 {
447 	return s->node[node];
448 }
449 
450 /*
451  * Iterator over all nodes. The body will be executed for each node that has
452  * a kmem_cache_node structure allocated (which is true for all online nodes)
453  */
454 #define for_each_kmem_cache_node(__s, __node, __n) \
455 	for (__node = 0; __node < nr_node_ids; __node++) \
456 		 if ((__n = get_node(__s, __node)))
457 
458 #endif
459 
460 void *slab_start(struct seq_file *m, loff_t *pos);
461 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
462 void slab_stop(struct seq_file *m, void *p);
463 int memcg_slab_show(struct seq_file *m, void *p);
464 
465 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
466 
467 #endif /* MM_SLAB_H */
468