1 #ifndef MM_SLAB_H 2 #define MM_SLAB_H 3 /* 4 * Internal slab definitions 5 */ 6 7 /* 8 * State of the slab allocator. 9 * 10 * This is used to describe the states of the allocator during bootup. 11 * Allocators use this to gradually bootstrap themselves. Most allocators 12 * have the problem that the structures used for managing slab caches are 13 * allocated from slab caches themselves. 14 */ 15 enum slab_state { 16 DOWN, /* No slab functionality yet */ 17 PARTIAL, /* SLUB: kmem_cache_node available */ 18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 19 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 20 UP, /* Slab caches usable but not all extras yet */ 21 FULL /* Everything is working */ 22 }; 23 24 extern enum slab_state slab_state; 25 26 /* The slab cache mutex protects the management structures during changes */ 27 extern struct mutex slab_mutex; 28 29 /* The list of all slab caches on the system */ 30 extern struct list_head slab_caches; 31 32 /* The slab cache that manages slab cache information */ 33 extern struct kmem_cache *kmem_cache; 34 35 unsigned long calculate_alignment(unsigned long flags, 36 unsigned long align, unsigned long size); 37 38 #ifndef CONFIG_SLOB 39 /* Kmalloc array related functions */ 40 void create_kmalloc_caches(unsigned long); 41 42 /* Find the kmalloc slab corresponding for a certain size */ 43 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 44 #endif 45 46 47 /* Functions provided by the slab allocators */ 48 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 49 50 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 51 unsigned long flags); 52 extern void create_boot_cache(struct kmem_cache *, const char *name, 53 size_t size, unsigned long flags); 54 55 struct mem_cgroup; 56 #ifdef CONFIG_SLUB 57 struct kmem_cache * 58 __kmem_cache_alias(const char *name, size_t size, size_t align, 59 unsigned long flags, void (*ctor)(void *)); 60 #else 61 static inline struct kmem_cache * 62 __kmem_cache_alias(const char *name, size_t size, size_t align, 63 unsigned long flags, void (*ctor)(void *)) 64 { return NULL; } 65 #endif 66 67 68 /* Legal flag mask for kmem_cache_create(), for various configurations */ 69 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 71 72 #if defined(CONFIG_DEBUG_SLAB) 73 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 74 #elif defined(CONFIG_SLUB_DEBUG) 75 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 76 SLAB_TRACE | SLAB_DEBUG_FREE) 77 #else 78 #define SLAB_DEBUG_FLAGS (0) 79 #endif 80 81 #if defined(CONFIG_SLAB) 82 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 84 #elif defined(CONFIG_SLUB) 85 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 86 SLAB_TEMPORARY | SLAB_NOTRACK) 87 #else 88 #define SLAB_CACHE_FLAGS (0) 89 #endif 90 91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 92 93 int __kmem_cache_shutdown(struct kmem_cache *); 94 int __kmem_cache_shrink(struct kmem_cache *); 95 void slab_kmem_cache_release(struct kmem_cache *); 96 97 struct seq_file; 98 struct file; 99 100 struct slabinfo { 101 unsigned long active_objs; 102 unsigned long num_objs; 103 unsigned long active_slabs; 104 unsigned long num_slabs; 105 unsigned long shared_avail; 106 unsigned int limit; 107 unsigned int batchcount; 108 unsigned int shared; 109 unsigned int objects_per_slab; 110 unsigned int cache_order; 111 }; 112 113 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 114 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 115 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 116 size_t count, loff_t *ppos); 117 118 #ifdef CONFIG_MEMCG_KMEM 119 static inline bool is_root_cache(struct kmem_cache *s) 120 { 121 return !s->memcg_params || s->memcg_params->is_root_cache; 122 } 123 124 static inline bool slab_equal_or_root(struct kmem_cache *s, 125 struct kmem_cache *p) 126 { 127 return (p == s) || 128 (s->memcg_params && (p == s->memcg_params->root_cache)); 129 } 130 131 /* 132 * We use suffixes to the name in memcg because we can't have caches 133 * created in the system with the same name. But when we print them 134 * locally, better refer to them with the base name 135 */ 136 static inline const char *cache_name(struct kmem_cache *s) 137 { 138 if (!is_root_cache(s)) 139 return s->memcg_params->root_cache->name; 140 return s->name; 141 } 142 143 /* 144 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 145 * That said the caller must assure the memcg's cache won't go away. Since once 146 * created a memcg's cache is destroyed only along with the root cache, it is 147 * true if we are going to allocate from the cache or hold a reference to the 148 * root cache by other means. Otherwise, we should hold either the slab_mutex 149 * or the memcg's slab_caches_mutex while calling this function and accessing 150 * the returned value. 151 */ 152 static inline struct kmem_cache * 153 cache_from_memcg_idx(struct kmem_cache *s, int idx) 154 { 155 struct kmem_cache *cachep; 156 struct memcg_cache_params *params; 157 158 if (!s->memcg_params) 159 return NULL; 160 161 rcu_read_lock(); 162 params = rcu_dereference(s->memcg_params); 163 cachep = params->memcg_caches[idx]; 164 rcu_read_unlock(); 165 166 /* 167 * Make sure we will access the up-to-date value. The code updating 168 * memcg_caches issues a write barrier to match this (see 169 * memcg_register_cache()). 170 */ 171 smp_read_barrier_depends(); 172 return cachep; 173 } 174 175 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 176 { 177 if (is_root_cache(s)) 178 return s; 179 return s->memcg_params->root_cache; 180 } 181 182 static __always_inline int memcg_charge_slab(struct kmem_cache *s, 183 gfp_t gfp, int order) 184 { 185 if (!memcg_kmem_enabled()) 186 return 0; 187 if (is_root_cache(s)) 188 return 0; 189 return __memcg_charge_slab(s, gfp, order); 190 } 191 192 static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 193 { 194 if (!memcg_kmem_enabled()) 195 return; 196 if (is_root_cache(s)) 197 return; 198 __memcg_uncharge_slab(s, order); 199 } 200 #else 201 static inline bool is_root_cache(struct kmem_cache *s) 202 { 203 return true; 204 } 205 206 static inline bool slab_equal_or_root(struct kmem_cache *s, 207 struct kmem_cache *p) 208 { 209 return true; 210 } 211 212 static inline const char *cache_name(struct kmem_cache *s) 213 { 214 return s->name; 215 } 216 217 static inline struct kmem_cache * 218 cache_from_memcg_idx(struct kmem_cache *s, int idx) 219 { 220 return NULL; 221 } 222 223 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 224 { 225 return s; 226 } 227 228 static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) 229 { 230 return 0; 231 } 232 233 static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 234 { 235 } 236 #endif 237 238 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 239 { 240 struct kmem_cache *cachep; 241 struct page *page; 242 243 /* 244 * When kmemcg is not being used, both assignments should return the 245 * same value. but we don't want to pay the assignment price in that 246 * case. If it is not compiled in, the compiler should be smart enough 247 * to not do even the assignment. In that case, slab_equal_or_root 248 * will also be a constant. 249 */ 250 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) 251 return s; 252 253 page = virt_to_head_page(x); 254 cachep = page->slab_cache; 255 if (slab_equal_or_root(cachep, s)) 256 return cachep; 257 258 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 259 __FUNCTION__, cachep->name, s->name); 260 WARN_ON_ONCE(1); 261 return s; 262 } 263 #endif 264 265 266 /* 267 * The slab lists for all objects. 268 */ 269 struct kmem_cache_node { 270 spinlock_t list_lock; 271 272 #ifdef CONFIG_SLAB 273 struct list_head slabs_partial; /* partial list first, better asm code */ 274 struct list_head slabs_full; 275 struct list_head slabs_free; 276 unsigned long free_objects; 277 unsigned int free_limit; 278 unsigned int colour_next; /* Per-node cache coloring */ 279 struct array_cache *shared; /* shared per node */ 280 struct array_cache **alien; /* on other nodes */ 281 unsigned long next_reap; /* updated without locking */ 282 int free_touched; /* updated without locking */ 283 #endif 284 285 #ifdef CONFIG_SLUB 286 unsigned long nr_partial; 287 struct list_head partial; 288 #ifdef CONFIG_SLUB_DEBUG 289 atomic_long_t nr_slabs; 290 atomic_long_t total_objects; 291 struct list_head full; 292 #endif 293 #endif 294 295 }; 296 297 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 298 void slab_stop(struct seq_file *m, void *p); 299