1 #ifndef MM_SLAB_H 2 #define MM_SLAB_H 3 /* 4 * Internal slab definitions 5 */ 6 7 /* 8 * State of the slab allocator. 9 * 10 * This is used to describe the states of the allocator during bootup. 11 * Allocators use this to gradually bootstrap themselves. Most allocators 12 * have the problem that the structures used for managing slab caches are 13 * allocated from slab caches themselves. 14 */ 15 enum slab_state { 16 DOWN, /* No slab functionality yet */ 17 PARTIAL, /* SLUB: kmem_cache_node available */ 18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ 19 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 20 UP, /* Slab caches usable but not all extras yet */ 21 FULL /* Everything is working */ 22 }; 23 24 extern enum slab_state slab_state; 25 26 /* The slab cache mutex protects the management structures during changes */ 27 extern struct mutex slab_mutex; 28 29 /* The list of all slab caches on the system */ 30 extern struct list_head slab_caches; 31 32 /* The slab cache that manages slab cache information */ 33 extern struct kmem_cache *kmem_cache; 34 35 unsigned long calculate_alignment(unsigned long flags, 36 unsigned long align, unsigned long size); 37 38 #ifndef CONFIG_SLOB 39 /* Kmalloc array related functions */ 40 void create_kmalloc_caches(unsigned long); 41 42 /* Find the kmalloc slab corresponding for a certain size */ 43 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 44 #endif 45 46 47 /* Functions provided by the slab allocators */ 48 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 49 50 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 51 unsigned long flags); 52 extern void create_boot_cache(struct kmem_cache *, const char *name, 53 size_t size, unsigned long flags); 54 55 struct mem_cgroup; 56 #ifdef CONFIG_SLUB 57 struct kmem_cache * 58 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 59 size_t align, unsigned long flags, void (*ctor)(void *)); 60 #else 61 static inline struct kmem_cache * 62 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, 63 size_t align, unsigned long flags, void (*ctor)(void *)) 64 { return NULL; } 65 #endif 66 67 68 /* Legal flag mask for kmem_cache_create(), for various configurations */ 69 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 71 72 #if defined(CONFIG_DEBUG_SLAB) 73 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 74 #elif defined(CONFIG_SLUB_DEBUG) 75 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 76 SLAB_TRACE | SLAB_DEBUG_FREE) 77 #else 78 #define SLAB_DEBUG_FLAGS (0) 79 #endif 80 81 #if defined(CONFIG_SLAB) 82 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) 84 #elif defined(CONFIG_SLUB) 85 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 86 SLAB_TEMPORARY | SLAB_NOTRACK) 87 #else 88 #define SLAB_CACHE_FLAGS (0) 89 #endif 90 91 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 92 93 int __kmem_cache_shutdown(struct kmem_cache *); 94 95 struct seq_file; 96 struct file; 97 98 struct slabinfo { 99 unsigned long active_objs; 100 unsigned long num_objs; 101 unsigned long active_slabs; 102 unsigned long num_slabs; 103 unsigned long shared_avail; 104 unsigned int limit; 105 unsigned int batchcount; 106 unsigned int shared; 107 unsigned int objects_per_slab; 108 unsigned int cache_order; 109 }; 110 111 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 112 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 113 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 114 size_t count, loff_t *ppos); 115 116 #ifdef CONFIG_MEMCG_KMEM 117 static inline bool is_root_cache(struct kmem_cache *s) 118 { 119 return !s->memcg_params || s->memcg_params->is_root_cache; 120 } 121 122 static inline bool cache_match_memcg(struct kmem_cache *cachep, 123 struct mem_cgroup *memcg) 124 { 125 return (is_root_cache(cachep) && !memcg) || 126 (cachep->memcg_params->memcg == memcg); 127 } 128 129 static inline void memcg_bind_pages(struct kmem_cache *s, int order) 130 { 131 if (!is_root_cache(s)) 132 atomic_add(1 << order, &s->memcg_params->nr_pages); 133 } 134 135 static inline void memcg_release_pages(struct kmem_cache *s, int order) 136 { 137 if (is_root_cache(s)) 138 return; 139 140 if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) 141 mem_cgroup_destroy_cache(s); 142 } 143 144 static inline bool slab_equal_or_root(struct kmem_cache *s, 145 struct kmem_cache *p) 146 { 147 return (p == s) || 148 (s->memcg_params && (p == s->memcg_params->root_cache)); 149 } 150 151 /* 152 * We use suffixes to the name in memcg because we can't have caches 153 * created in the system with the same name. But when we print them 154 * locally, better refer to them with the base name 155 */ 156 static inline const char *cache_name(struct kmem_cache *s) 157 { 158 if (!is_root_cache(s)) 159 return s->memcg_params->root_cache->name; 160 return s->name; 161 } 162 163 static inline struct kmem_cache * 164 cache_from_memcg_idx(struct kmem_cache *s, int idx) 165 { 166 if (!s->memcg_params) 167 return NULL; 168 return s->memcg_params->memcg_caches[idx]; 169 } 170 171 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 172 { 173 if (is_root_cache(s)) 174 return s; 175 return s->memcg_params->root_cache; 176 } 177 #else 178 static inline bool is_root_cache(struct kmem_cache *s) 179 { 180 return true; 181 } 182 183 static inline bool cache_match_memcg(struct kmem_cache *cachep, 184 struct mem_cgroup *memcg) 185 { 186 return true; 187 } 188 189 static inline void memcg_bind_pages(struct kmem_cache *s, int order) 190 { 191 } 192 193 static inline void memcg_release_pages(struct kmem_cache *s, int order) 194 { 195 } 196 197 static inline bool slab_equal_or_root(struct kmem_cache *s, 198 struct kmem_cache *p) 199 { 200 return true; 201 } 202 203 static inline const char *cache_name(struct kmem_cache *s) 204 { 205 return s->name; 206 } 207 208 static inline struct kmem_cache * 209 cache_from_memcg_idx(struct kmem_cache *s, int idx) 210 { 211 return NULL; 212 } 213 214 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 215 { 216 return s; 217 } 218 #endif 219 220 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 221 { 222 struct kmem_cache *cachep; 223 struct page *page; 224 225 /* 226 * When kmemcg is not being used, both assignments should return the 227 * same value. but we don't want to pay the assignment price in that 228 * case. If it is not compiled in, the compiler should be smart enough 229 * to not do even the assignment. In that case, slab_equal_or_root 230 * will also be a constant. 231 */ 232 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) 233 return s; 234 235 page = virt_to_head_page(x); 236 cachep = page->slab_cache; 237 if (slab_equal_or_root(cachep, s)) 238 return cachep; 239 240 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 241 __FUNCTION__, cachep->name, s->name); 242 WARN_ON_ONCE(1); 243 return s; 244 } 245 #endif 246 247 248 /* 249 * The slab lists for all objects. 250 */ 251 struct kmem_cache_node { 252 spinlock_t list_lock; 253 254 #ifdef CONFIG_SLAB 255 struct list_head slabs_partial; /* partial list first, better asm code */ 256 struct list_head slabs_full; 257 struct list_head slabs_free; 258 unsigned long free_objects; 259 unsigned int free_limit; 260 unsigned int colour_next; /* Per-node cache coloring */ 261 struct array_cache *shared; /* shared per node */ 262 struct array_cache **alien; /* on other nodes */ 263 unsigned long next_reap; /* updated without locking */ 264 int free_touched; /* updated without locking */ 265 #endif 266 267 #ifdef CONFIG_SLUB 268 unsigned long nr_partial; 269 struct list_head partial; 270 #ifdef CONFIG_SLUB_DEBUG 271 atomic_long_t nr_slabs; 272 atomic_long_t total_objects; 273 struct list_head full; 274 #endif 275 #endif 276 277 }; 278 279 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 280 void slab_stop(struct seq_file *m, void *p); 281