1 #ifndef MM_SLAB_H 2 #define MM_SLAB_H 3 /* 4 * Internal slab definitions 5 */ 6 7 #ifdef CONFIG_SLOB 8 /* 9 * Common fields provided in kmem_cache by all slab allocators 10 * This struct is either used directly by the allocator (SLOB) 11 * or the allocator must include definitions for all fields 12 * provided in kmem_cache_common in their definition of kmem_cache. 13 * 14 * Once we can do anonymous structs (C11 standard) we could put a 15 * anonymous struct definition in these allocators so that the 16 * separate allocations in the kmem_cache structure of SLAB and 17 * SLUB is no longer needed. 18 */ 19 struct kmem_cache { 20 unsigned int object_size;/* The original size of the object */ 21 unsigned int size; /* The aligned/padded/added on size */ 22 unsigned int align; /* Alignment as calculated */ 23 unsigned long flags; /* Active flags on the slab */ 24 const char *name; /* Slab name for sysfs */ 25 int refcount; /* Use counter */ 26 void (*ctor)(void *); /* Called on object slot creation */ 27 struct list_head list; /* List of all slab caches on the system */ 28 }; 29 30 #endif /* CONFIG_SLOB */ 31 32 #ifdef CONFIG_SLAB 33 #include <linux/slab_def.h> 34 #endif 35 36 #ifdef CONFIG_SLUB 37 #include <linux/slub_def.h> 38 #endif 39 40 #include <linux/memcontrol.h> 41 42 /* 43 * State of the slab allocator. 44 * 45 * This is used to describe the states of the allocator during bootup. 46 * Allocators use this to gradually bootstrap themselves. Most allocators 47 * have the problem that the structures used for managing slab caches are 48 * allocated from slab caches themselves. 49 */ 50 enum slab_state { 51 DOWN, /* No slab functionality yet */ 52 PARTIAL, /* SLUB: kmem_cache_node available */ 53 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 54 UP, /* Slab caches usable but not all extras yet */ 55 FULL /* Everything is working */ 56 }; 57 58 extern enum slab_state slab_state; 59 60 /* The slab cache mutex protects the management structures during changes */ 61 extern struct mutex slab_mutex; 62 63 /* The list of all slab caches on the system */ 64 extern struct list_head slab_caches; 65 66 /* The slab cache that manages slab cache information */ 67 extern struct kmem_cache *kmem_cache; 68 69 unsigned long calculate_alignment(unsigned long flags, 70 unsigned long align, unsigned long size); 71 72 #ifndef CONFIG_SLOB 73 /* Kmalloc array related functions */ 74 void setup_kmalloc_cache_index_table(void); 75 void create_kmalloc_caches(unsigned long); 76 77 /* Find the kmalloc slab corresponding for a certain size */ 78 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 79 #endif 80 81 82 /* Functions provided by the slab allocators */ 83 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 84 85 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 86 unsigned long flags); 87 extern void create_boot_cache(struct kmem_cache *, const char *name, 88 size_t size, unsigned long flags); 89 90 int slab_unmergeable(struct kmem_cache *s); 91 struct kmem_cache *find_mergeable(size_t size, size_t align, 92 unsigned long flags, const char *name, void (*ctor)(void *)); 93 #ifndef CONFIG_SLOB 94 struct kmem_cache * 95 __kmem_cache_alias(const char *name, size_t size, size_t align, 96 unsigned long flags, void (*ctor)(void *)); 97 98 unsigned long kmem_cache_flags(unsigned long object_size, 99 unsigned long flags, const char *name, 100 void (*ctor)(void *)); 101 #else 102 static inline struct kmem_cache * 103 __kmem_cache_alias(const char *name, size_t size, size_t align, 104 unsigned long flags, void (*ctor)(void *)) 105 { return NULL; } 106 107 static inline unsigned long kmem_cache_flags(unsigned long object_size, 108 unsigned long flags, const char *name, 109 void (*ctor)(void *)) 110 { 111 return flags; 112 } 113 #endif 114 115 116 /* Legal flag mask for kmem_cache_create(), for various configurations */ 117 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 118 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 119 120 #if defined(CONFIG_DEBUG_SLAB) 121 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 122 #elif defined(CONFIG_SLUB_DEBUG) 123 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 124 SLAB_TRACE | SLAB_DEBUG_FREE) 125 #else 126 #define SLAB_DEBUG_FLAGS (0) 127 #endif 128 129 #if defined(CONFIG_SLAB) 130 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 132 #elif defined(CONFIG_SLUB) 133 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 134 SLAB_TEMPORARY | SLAB_NOTRACK) 135 #else 136 #define SLAB_CACHE_FLAGS (0) 137 #endif 138 139 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 140 141 int __kmem_cache_shutdown(struct kmem_cache *); 142 int __kmem_cache_shrink(struct kmem_cache *, bool); 143 void slab_kmem_cache_release(struct kmem_cache *); 144 145 struct seq_file; 146 struct file; 147 148 struct slabinfo { 149 unsigned long active_objs; 150 unsigned long num_objs; 151 unsigned long active_slabs; 152 unsigned long num_slabs; 153 unsigned long shared_avail; 154 unsigned int limit; 155 unsigned int batchcount; 156 unsigned int shared; 157 unsigned int objects_per_slab; 158 unsigned int cache_order; 159 }; 160 161 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 162 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 163 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 164 size_t count, loff_t *ppos); 165 166 #ifdef CONFIG_MEMCG_KMEM 167 /* 168 * Iterate over all memcg caches of the given root cache. The caller must hold 169 * slab_mutex. 170 */ 171 #define for_each_memcg_cache(iter, root) \ 172 list_for_each_entry(iter, &(root)->memcg_params.list, \ 173 memcg_params.list) 174 175 #define for_each_memcg_cache_safe(iter, tmp, root) \ 176 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \ 177 memcg_params.list) 178 179 static inline bool is_root_cache(struct kmem_cache *s) 180 { 181 return s->memcg_params.is_root_cache; 182 } 183 184 static inline bool slab_equal_or_root(struct kmem_cache *s, 185 struct kmem_cache *p) 186 { 187 return p == s || p == s->memcg_params.root_cache; 188 } 189 190 /* 191 * We use suffixes to the name in memcg because we can't have caches 192 * created in the system with the same name. But when we print them 193 * locally, better refer to them with the base name 194 */ 195 static inline const char *cache_name(struct kmem_cache *s) 196 { 197 if (!is_root_cache(s)) 198 s = s->memcg_params.root_cache; 199 return s->name; 200 } 201 202 /* 203 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 204 * That said the caller must assure the memcg's cache won't go away by either 205 * taking a css reference to the owner cgroup, or holding the slab_mutex. 206 */ 207 static inline struct kmem_cache * 208 cache_from_memcg_idx(struct kmem_cache *s, int idx) 209 { 210 struct kmem_cache *cachep; 211 struct memcg_cache_array *arr; 212 213 rcu_read_lock(); 214 arr = rcu_dereference(s->memcg_params.memcg_caches); 215 216 /* 217 * Make sure we will access the up-to-date value. The code updating 218 * memcg_caches issues a write barrier to match this (see 219 * memcg_create_kmem_cache()). 220 */ 221 cachep = lockless_dereference(arr->entries[idx]); 222 rcu_read_unlock(); 223 224 return cachep; 225 } 226 227 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 228 { 229 if (is_root_cache(s)) 230 return s; 231 return s->memcg_params.root_cache; 232 } 233 234 static __always_inline int memcg_charge_slab(struct kmem_cache *s, 235 gfp_t gfp, int order) 236 { 237 if (!memcg_kmem_enabled()) 238 return 0; 239 if (is_root_cache(s)) 240 return 0; 241 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); 242 } 243 244 static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 245 { 246 if (!memcg_kmem_enabled()) 247 return; 248 if (is_root_cache(s)) 249 return; 250 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); 251 } 252 253 extern void slab_init_memcg_params(struct kmem_cache *); 254 255 #else /* !CONFIG_MEMCG_KMEM */ 256 257 #define for_each_memcg_cache(iter, root) \ 258 for ((void)(iter), (void)(root); 0; ) 259 #define for_each_memcg_cache_safe(iter, tmp, root) \ 260 for ((void)(iter), (void)(tmp), (void)(root); 0; ) 261 262 static inline bool is_root_cache(struct kmem_cache *s) 263 { 264 return true; 265 } 266 267 static inline bool slab_equal_or_root(struct kmem_cache *s, 268 struct kmem_cache *p) 269 { 270 return true; 271 } 272 273 static inline const char *cache_name(struct kmem_cache *s) 274 { 275 return s->name; 276 } 277 278 static inline struct kmem_cache * 279 cache_from_memcg_idx(struct kmem_cache *s, int idx) 280 { 281 return NULL; 282 } 283 284 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 285 { 286 return s; 287 } 288 289 static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) 290 { 291 return 0; 292 } 293 294 static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 295 { 296 } 297 298 static inline void slab_init_memcg_params(struct kmem_cache *s) 299 { 300 } 301 #endif /* CONFIG_MEMCG_KMEM */ 302 303 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 304 { 305 struct kmem_cache *cachep; 306 struct page *page; 307 308 /* 309 * When kmemcg is not being used, both assignments should return the 310 * same value. but we don't want to pay the assignment price in that 311 * case. If it is not compiled in, the compiler should be smart enough 312 * to not do even the assignment. In that case, slab_equal_or_root 313 * will also be a constant. 314 */ 315 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) 316 return s; 317 318 page = virt_to_head_page(x); 319 cachep = page->slab_cache; 320 if (slab_equal_or_root(cachep, s)) 321 return cachep; 322 323 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 324 __func__, cachep->name, s->name); 325 WARN_ON_ONCE(1); 326 return s; 327 } 328 329 #ifndef CONFIG_SLOB 330 /* 331 * The slab lists for all objects. 332 */ 333 struct kmem_cache_node { 334 spinlock_t list_lock; 335 336 #ifdef CONFIG_SLAB 337 struct list_head slabs_partial; /* partial list first, better asm code */ 338 struct list_head slabs_full; 339 struct list_head slabs_free; 340 unsigned long free_objects; 341 unsigned int free_limit; 342 unsigned int colour_next; /* Per-node cache coloring */ 343 struct array_cache *shared; /* shared per node */ 344 struct alien_cache **alien; /* on other nodes */ 345 unsigned long next_reap; /* updated without locking */ 346 int free_touched; /* updated without locking */ 347 #endif 348 349 #ifdef CONFIG_SLUB 350 unsigned long nr_partial; 351 struct list_head partial; 352 #ifdef CONFIG_SLUB_DEBUG 353 atomic_long_t nr_slabs; 354 atomic_long_t total_objects; 355 struct list_head full; 356 #endif 357 #endif 358 359 }; 360 361 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 362 { 363 return s->node[node]; 364 } 365 366 /* 367 * Iterator over all nodes. The body will be executed for each node that has 368 * a kmem_cache_node structure allocated (which is true for all online nodes) 369 */ 370 #define for_each_kmem_cache_node(__s, __node, __n) \ 371 for (__node = 0; __node < nr_node_ids; __node++) \ 372 if ((__n = get_node(__s, __node))) 373 374 #endif 375 376 void *slab_start(struct seq_file *m, loff_t *pos); 377 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 378 void slab_stop(struct seq_file *m, void *p); 379 int memcg_slab_show(struct seq_file *m, void *p); 380 381 #endif /* MM_SLAB_H */ 382