xref: /openbmc/linux/mm/slab.h (revision 36bccb11)
1 #ifndef MM_SLAB_H
2 #define MM_SLAB_H
3 /*
4  * Internal slab definitions
5  */
6 
7 /*
8  * State of the slab allocator.
9  *
10  * This is used to describe the states of the allocator during bootup.
11  * Allocators use this to gradually bootstrap themselves. Most allocators
12  * have the problem that the structures used for managing slab caches are
13  * allocated from slab caches themselves.
14  */
15 enum slab_state {
16 	DOWN,			/* No slab functionality yet */
17 	PARTIAL,		/* SLUB: kmem_cache_node available */
18 	PARTIAL_ARRAYCACHE,	/* SLAB: kmalloc size for arraycache available */
19 	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
20 	UP,			/* Slab caches usable but not all extras yet */
21 	FULL			/* Everything is working */
22 };
23 
24 extern enum slab_state slab_state;
25 
26 /* The slab cache mutex protects the management structures during changes */
27 extern struct mutex slab_mutex;
28 
29 /* The list of all slab caches on the system */
30 extern struct list_head slab_caches;
31 
32 /* The slab cache that manages slab cache information */
33 extern struct kmem_cache *kmem_cache;
34 
35 unsigned long calculate_alignment(unsigned long flags,
36 		unsigned long align, unsigned long size);
37 
38 #ifndef CONFIG_SLOB
39 /* Kmalloc array related functions */
40 void create_kmalloc_caches(unsigned long);
41 
42 /* Find the kmalloc slab corresponding for a certain size */
43 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44 #endif
45 
46 
47 /* Functions provided by the slab allocators */
48 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
49 
50 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 			unsigned long flags);
52 extern void create_boot_cache(struct kmem_cache *, const char *name,
53 			size_t size, unsigned long flags);
54 
55 struct mem_cgroup;
56 #ifdef CONFIG_SLUB
57 struct kmem_cache *
58 __kmem_cache_alias(const char *name, size_t size, size_t align,
59 		   unsigned long flags, void (*ctor)(void *));
60 #else
61 static inline struct kmem_cache *
62 __kmem_cache_alias(const char *name, size_t size, size_t align,
63 		   unsigned long flags, void (*ctor)(void *))
64 { return NULL; }
65 #endif
66 
67 
68 /* Legal flag mask for kmem_cache_create(), for various configurations */
69 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71 
72 #if defined(CONFIG_DEBUG_SLAB)
73 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74 #elif defined(CONFIG_SLUB_DEBUG)
75 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 			  SLAB_TRACE | SLAB_DEBUG_FREE)
77 #else
78 #define SLAB_DEBUG_FLAGS (0)
79 #endif
80 
81 #if defined(CONFIG_SLAB)
82 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84 #elif defined(CONFIG_SLUB)
85 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 			  SLAB_TEMPORARY | SLAB_NOTRACK)
87 #else
88 #define SLAB_CACHE_FLAGS (0)
89 #endif
90 
91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
92 
93 int __kmem_cache_shutdown(struct kmem_cache *);
94 void slab_kmem_cache_release(struct kmem_cache *);
95 
96 struct seq_file;
97 struct file;
98 
99 struct slabinfo {
100 	unsigned long active_objs;
101 	unsigned long num_objs;
102 	unsigned long active_slabs;
103 	unsigned long num_slabs;
104 	unsigned long shared_avail;
105 	unsigned int limit;
106 	unsigned int batchcount;
107 	unsigned int shared;
108 	unsigned int objects_per_slab;
109 	unsigned int cache_order;
110 };
111 
112 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
113 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
114 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
115 		       size_t count, loff_t *ppos);
116 
117 #ifdef CONFIG_MEMCG_KMEM
118 static inline bool is_root_cache(struct kmem_cache *s)
119 {
120 	return !s->memcg_params || s->memcg_params->is_root_cache;
121 }
122 
123 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
124 {
125 	if (!is_root_cache(s))
126 		atomic_add(1 << order, &s->memcg_params->nr_pages);
127 }
128 
129 static inline void memcg_release_pages(struct kmem_cache *s, int order)
130 {
131 	if (is_root_cache(s))
132 		return;
133 
134 	if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
135 		mem_cgroup_destroy_cache(s);
136 }
137 
138 static inline bool slab_equal_or_root(struct kmem_cache *s,
139 					struct kmem_cache *p)
140 {
141 	return (p == s) ||
142 		(s->memcg_params && (p == s->memcg_params->root_cache));
143 }
144 
145 /*
146  * We use suffixes to the name in memcg because we can't have caches
147  * created in the system with the same name. But when we print them
148  * locally, better refer to them with the base name
149  */
150 static inline const char *cache_name(struct kmem_cache *s)
151 {
152 	if (!is_root_cache(s))
153 		return s->memcg_params->root_cache->name;
154 	return s->name;
155 }
156 
157 /*
158  * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
159  * That said the caller must assure the memcg's cache won't go away. Since once
160  * created a memcg's cache is destroyed only along with the root cache, it is
161  * true if we are going to allocate from the cache or hold a reference to the
162  * root cache by other means. Otherwise, we should hold either the slab_mutex
163  * or the memcg's slab_caches_mutex while calling this function and accessing
164  * the returned value.
165  */
166 static inline struct kmem_cache *
167 cache_from_memcg_idx(struct kmem_cache *s, int idx)
168 {
169 	struct kmem_cache *cachep;
170 	struct memcg_cache_params *params;
171 
172 	if (!s->memcg_params)
173 		return NULL;
174 
175 	rcu_read_lock();
176 	params = rcu_dereference(s->memcg_params);
177 	cachep = params->memcg_caches[idx];
178 	rcu_read_unlock();
179 
180 	/*
181 	 * Make sure we will access the up-to-date value. The code updating
182 	 * memcg_caches issues a write barrier to match this (see
183 	 * memcg_register_cache()).
184 	 */
185 	smp_read_barrier_depends();
186 	return cachep;
187 }
188 
189 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
190 {
191 	if (is_root_cache(s))
192 		return s;
193 	return s->memcg_params->root_cache;
194 }
195 #else
196 static inline bool is_root_cache(struct kmem_cache *s)
197 {
198 	return true;
199 }
200 
201 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
202 {
203 }
204 
205 static inline void memcg_release_pages(struct kmem_cache *s, int order)
206 {
207 }
208 
209 static inline bool slab_equal_or_root(struct kmem_cache *s,
210 				      struct kmem_cache *p)
211 {
212 	return true;
213 }
214 
215 static inline const char *cache_name(struct kmem_cache *s)
216 {
217 	return s->name;
218 }
219 
220 static inline struct kmem_cache *
221 cache_from_memcg_idx(struct kmem_cache *s, int idx)
222 {
223 	return NULL;
224 }
225 
226 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
227 {
228 	return s;
229 }
230 #endif
231 
232 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
233 {
234 	struct kmem_cache *cachep;
235 	struct page *page;
236 
237 	/*
238 	 * When kmemcg is not being used, both assignments should return the
239 	 * same value. but we don't want to pay the assignment price in that
240 	 * case. If it is not compiled in, the compiler should be smart enough
241 	 * to not do even the assignment. In that case, slab_equal_or_root
242 	 * will also be a constant.
243 	 */
244 	if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
245 		return s;
246 
247 	page = virt_to_head_page(x);
248 	cachep = page->slab_cache;
249 	if (slab_equal_or_root(cachep, s))
250 		return cachep;
251 
252 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
253 		__FUNCTION__, cachep->name, s->name);
254 	WARN_ON_ONCE(1);
255 	return s;
256 }
257 #endif
258 
259 
260 /*
261  * The slab lists for all objects.
262  */
263 struct kmem_cache_node {
264 	spinlock_t list_lock;
265 
266 #ifdef CONFIG_SLAB
267 	struct list_head slabs_partial;	/* partial list first, better asm code */
268 	struct list_head slabs_full;
269 	struct list_head slabs_free;
270 	unsigned long free_objects;
271 	unsigned int free_limit;
272 	unsigned int colour_next;	/* Per-node cache coloring */
273 	struct array_cache *shared;	/* shared per node */
274 	struct array_cache **alien;	/* on other nodes */
275 	unsigned long next_reap;	/* updated without locking */
276 	int free_touched;		/* updated without locking */
277 #endif
278 
279 #ifdef CONFIG_SLUB
280 	unsigned long nr_partial;
281 	struct list_head partial;
282 #ifdef CONFIG_SLUB_DEBUG
283 	atomic_long_t nr_slabs;
284 	atomic_long_t total_objects;
285 	struct list_head full;
286 #endif
287 #endif
288 
289 };
290 
291 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
292 void slab_stop(struct seq_file *m, void *p);
293