1 #ifndef MM_SLAB_H 2 #define MM_SLAB_H 3 /* 4 * Internal slab definitions 5 */ 6 7 #ifdef CONFIG_SLOB 8 /* 9 * Common fields provided in kmem_cache by all slab allocators 10 * This struct is either used directly by the allocator (SLOB) 11 * or the allocator must include definitions for all fields 12 * provided in kmem_cache_common in their definition of kmem_cache. 13 * 14 * Once we can do anonymous structs (C11 standard) we could put a 15 * anonymous struct definition in these allocators so that the 16 * separate allocations in the kmem_cache structure of SLAB and 17 * SLUB is no longer needed. 18 */ 19 struct kmem_cache { 20 unsigned int object_size;/* The original size of the object */ 21 unsigned int size; /* The aligned/padded/added on size */ 22 unsigned int align; /* Alignment as calculated */ 23 unsigned long flags; /* Active flags on the slab */ 24 const char *name; /* Slab name for sysfs */ 25 int refcount; /* Use counter */ 26 void (*ctor)(void *); /* Called on object slot creation */ 27 struct list_head list; /* List of all slab caches on the system */ 28 }; 29 30 #endif /* CONFIG_SLOB */ 31 32 #ifdef CONFIG_SLAB 33 #include <linux/slab_def.h> 34 #endif 35 36 #ifdef CONFIG_SLUB 37 #include <linux/slub_def.h> 38 #endif 39 40 #include <linux/memcontrol.h> 41 #include <linux/fault-inject.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/kasan.h> 44 #include <linux/kmemleak.h> 45 #include <linux/random.h> 46 47 /* 48 * State of the slab allocator. 49 * 50 * This is used to describe the states of the allocator during bootup. 51 * Allocators use this to gradually bootstrap themselves. Most allocators 52 * have the problem that the structures used for managing slab caches are 53 * allocated from slab caches themselves. 54 */ 55 enum slab_state { 56 DOWN, /* No slab functionality yet */ 57 PARTIAL, /* SLUB: kmem_cache_node available */ 58 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 59 UP, /* Slab caches usable but not all extras yet */ 60 FULL /* Everything is working */ 61 }; 62 63 extern enum slab_state slab_state; 64 65 /* The slab cache mutex protects the management structures during changes */ 66 extern struct mutex slab_mutex; 67 68 /* The list of all slab caches on the system */ 69 extern struct list_head slab_caches; 70 71 /* The slab cache that manages slab cache information */ 72 extern struct kmem_cache *kmem_cache; 73 74 unsigned long calculate_alignment(unsigned long flags, 75 unsigned long align, unsigned long size); 76 77 #ifndef CONFIG_SLOB 78 /* Kmalloc array related functions */ 79 void setup_kmalloc_cache_index_table(void); 80 void create_kmalloc_caches(unsigned long); 81 82 /* Find the kmalloc slab corresponding for a certain size */ 83 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 84 #endif 85 86 87 /* Functions provided by the slab allocators */ 88 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 89 90 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 91 unsigned long flags); 92 extern void create_boot_cache(struct kmem_cache *, const char *name, 93 size_t size, unsigned long flags); 94 95 int slab_unmergeable(struct kmem_cache *s); 96 struct kmem_cache *find_mergeable(size_t size, size_t align, 97 unsigned long flags, const char *name, void (*ctor)(void *)); 98 #ifndef CONFIG_SLOB 99 struct kmem_cache * 100 __kmem_cache_alias(const char *name, size_t size, size_t align, 101 unsigned long flags, void (*ctor)(void *)); 102 103 unsigned long kmem_cache_flags(unsigned long object_size, 104 unsigned long flags, const char *name, 105 void (*ctor)(void *)); 106 #else 107 static inline struct kmem_cache * 108 __kmem_cache_alias(const char *name, size_t size, size_t align, 109 unsigned long flags, void (*ctor)(void *)) 110 { return NULL; } 111 112 static inline unsigned long kmem_cache_flags(unsigned long object_size, 113 unsigned long flags, const char *name, 114 void (*ctor)(void *)) 115 { 116 return flags; 117 } 118 #endif 119 120 121 /* Legal flag mask for kmem_cache_create(), for various configurations */ 122 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 123 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) 124 125 #if defined(CONFIG_DEBUG_SLAB) 126 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 127 #elif defined(CONFIG_SLUB_DEBUG) 128 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 129 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 130 #else 131 #define SLAB_DEBUG_FLAGS (0) 132 #endif 133 134 #if defined(CONFIG_SLAB) 135 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 136 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 137 SLAB_NOTRACK | SLAB_ACCOUNT) 138 #elif defined(CONFIG_SLUB) 139 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 140 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) 141 #else 142 #define SLAB_CACHE_FLAGS (0) 143 #endif 144 145 /* Common flags available with current configuration */ 146 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 147 148 /* Common flags permitted for kmem_cache_create */ 149 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 150 SLAB_RED_ZONE | \ 151 SLAB_POISON | \ 152 SLAB_STORE_USER | \ 153 SLAB_TRACE | \ 154 SLAB_CONSISTENCY_CHECKS | \ 155 SLAB_MEM_SPREAD | \ 156 SLAB_NOLEAKTRACE | \ 157 SLAB_RECLAIM_ACCOUNT | \ 158 SLAB_TEMPORARY | \ 159 SLAB_NOTRACK | \ 160 SLAB_ACCOUNT) 161 162 int __kmem_cache_shutdown(struct kmem_cache *); 163 void __kmem_cache_release(struct kmem_cache *); 164 int __kmem_cache_shrink(struct kmem_cache *); 165 void slab_kmem_cache_release(struct kmem_cache *); 166 167 struct seq_file; 168 struct file; 169 170 struct slabinfo { 171 unsigned long active_objs; 172 unsigned long num_objs; 173 unsigned long active_slabs; 174 unsigned long num_slabs; 175 unsigned long shared_avail; 176 unsigned int limit; 177 unsigned int batchcount; 178 unsigned int shared; 179 unsigned int objects_per_slab; 180 unsigned int cache_order; 181 }; 182 183 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 184 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 185 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 186 size_t count, loff_t *ppos); 187 188 /* 189 * Generic implementation of bulk operations 190 * These are useful for situations in which the allocator cannot 191 * perform optimizations. In that case segments of the object listed 192 * may be allocated or freed using these operations. 193 */ 194 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 195 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 196 197 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 198 /* 199 * Iterate over all memcg caches of the given root cache. The caller must hold 200 * slab_mutex. 201 */ 202 #define for_each_memcg_cache(iter, root) \ 203 list_for_each_entry(iter, &(root)->memcg_params.list, \ 204 memcg_params.list) 205 206 static inline bool is_root_cache(struct kmem_cache *s) 207 { 208 return s->memcg_params.is_root_cache; 209 } 210 211 static inline bool slab_equal_or_root(struct kmem_cache *s, 212 struct kmem_cache *p) 213 { 214 return p == s || p == s->memcg_params.root_cache; 215 } 216 217 /* 218 * We use suffixes to the name in memcg because we can't have caches 219 * created in the system with the same name. But when we print them 220 * locally, better refer to them with the base name 221 */ 222 static inline const char *cache_name(struct kmem_cache *s) 223 { 224 if (!is_root_cache(s)) 225 s = s->memcg_params.root_cache; 226 return s->name; 227 } 228 229 /* 230 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 231 * That said the caller must assure the memcg's cache won't go away by either 232 * taking a css reference to the owner cgroup, or holding the slab_mutex. 233 */ 234 static inline struct kmem_cache * 235 cache_from_memcg_idx(struct kmem_cache *s, int idx) 236 { 237 struct kmem_cache *cachep; 238 struct memcg_cache_array *arr; 239 240 rcu_read_lock(); 241 arr = rcu_dereference(s->memcg_params.memcg_caches); 242 243 /* 244 * Make sure we will access the up-to-date value. The code updating 245 * memcg_caches issues a write barrier to match this (see 246 * memcg_create_kmem_cache()). 247 */ 248 cachep = lockless_dereference(arr->entries[idx]); 249 rcu_read_unlock(); 250 251 return cachep; 252 } 253 254 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 255 { 256 if (is_root_cache(s)) 257 return s; 258 return s->memcg_params.root_cache; 259 } 260 261 static __always_inline int memcg_charge_slab(struct page *page, 262 gfp_t gfp, int order, 263 struct kmem_cache *s) 264 { 265 int ret; 266 267 if (!memcg_kmem_enabled()) 268 return 0; 269 if (is_root_cache(s)) 270 return 0; 271 272 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 273 if (ret) 274 return ret; 275 276 memcg_kmem_update_page_stat(page, 277 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 278 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 279 1 << order); 280 return 0; 281 } 282 283 static __always_inline void memcg_uncharge_slab(struct page *page, int order, 284 struct kmem_cache *s) 285 { 286 if (!memcg_kmem_enabled()) 287 return; 288 289 memcg_kmem_update_page_stat(page, 290 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 291 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, 292 -(1 << order)); 293 memcg_kmem_uncharge(page, order); 294 } 295 296 extern void slab_init_memcg_params(struct kmem_cache *); 297 298 #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 299 300 #define for_each_memcg_cache(iter, root) \ 301 for ((void)(iter), (void)(root); 0; ) 302 303 static inline bool is_root_cache(struct kmem_cache *s) 304 { 305 return true; 306 } 307 308 static inline bool slab_equal_or_root(struct kmem_cache *s, 309 struct kmem_cache *p) 310 { 311 return true; 312 } 313 314 static inline const char *cache_name(struct kmem_cache *s) 315 { 316 return s->name; 317 } 318 319 static inline struct kmem_cache * 320 cache_from_memcg_idx(struct kmem_cache *s, int idx) 321 { 322 return NULL; 323 } 324 325 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 326 { 327 return s; 328 } 329 330 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 331 struct kmem_cache *s) 332 { 333 return 0; 334 } 335 336 static inline void memcg_uncharge_slab(struct page *page, int order, 337 struct kmem_cache *s) 338 { 339 } 340 341 static inline void slab_init_memcg_params(struct kmem_cache *s) 342 { 343 } 344 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 345 346 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 347 { 348 struct kmem_cache *cachep; 349 struct page *page; 350 351 /* 352 * When kmemcg is not being used, both assignments should return the 353 * same value. but we don't want to pay the assignment price in that 354 * case. If it is not compiled in, the compiler should be smart enough 355 * to not do even the assignment. In that case, slab_equal_or_root 356 * will also be a constant. 357 */ 358 if (!memcg_kmem_enabled() && 359 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 360 return s; 361 362 page = virt_to_head_page(x); 363 cachep = page->slab_cache; 364 if (slab_equal_or_root(cachep, s)) 365 return cachep; 366 367 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 368 __func__, s->name, cachep->name); 369 WARN_ON_ONCE(1); 370 return s; 371 } 372 373 static inline size_t slab_ksize(const struct kmem_cache *s) 374 { 375 #ifndef CONFIG_SLUB 376 return s->object_size; 377 378 #else /* CONFIG_SLUB */ 379 # ifdef CONFIG_SLUB_DEBUG 380 /* 381 * Debugging requires use of the padding between object 382 * and whatever may come after it. 383 */ 384 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 385 return s->object_size; 386 # endif 387 if (s->flags & SLAB_KASAN) 388 return s->object_size; 389 /* 390 * If we have the need to store the freelist pointer 391 * back there or track user information then we can 392 * only use the space before that information. 393 */ 394 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) 395 return s->inuse; 396 /* 397 * Else we can use all the padding etc for the allocation 398 */ 399 return s->size; 400 #endif 401 } 402 403 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 404 gfp_t flags) 405 { 406 flags &= gfp_allowed_mask; 407 lockdep_trace_alloc(flags); 408 might_sleep_if(gfpflags_allow_blocking(flags)); 409 410 if (should_failslab(s, flags)) 411 return NULL; 412 413 if (memcg_kmem_enabled() && 414 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 415 return memcg_kmem_get_cache(s); 416 417 return s; 418 } 419 420 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 421 size_t size, void **p) 422 { 423 size_t i; 424 425 flags &= gfp_allowed_mask; 426 for (i = 0; i < size; i++) { 427 void *object = p[i]; 428 429 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 430 kmemleak_alloc_recursive(object, s->object_size, 1, 431 s->flags, flags); 432 kasan_slab_alloc(s, object, flags); 433 } 434 435 if (memcg_kmem_enabled()) 436 memcg_kmem_put_cache(s); 437 } 438 439 #ifndef CONFIG_SLOB 440 /* 441 * The slab lists for all objects. 442 */ 443 struct kmem_cache_node { 444 spinlock_t list_lock; 445 446 #ifdef CONFIG_SLAB 447 struct list_head slabs_partial; /* partial list first, better asm code */ 448 struct list_head slabs_full; 449 struct list_head slabs_free; 450 unsigned long total_slabs; /* length of all slab lists */ 451 unsigned long free_slabs; /* length of free slab list only */ 452 unsigned long free_objects; 453 unsigned int free_limit; 454 unsigned int colour_next; /* Per-node cache coloring */ 455 struct array_cache *shared; /* shared per node */ 456 struct alien_cache **alien; /* on other nodes */ 457 unsigned long next_reap; /* updated without locking */ 458 int free_touched; /* updated without locking */ 459 #endif 460 461 #ifdef CONFIG_SLUB 462 unsigned long nr_partial; 463 struct list_head partial; 464 #ifdef CONFIG_SLUB_DEBUG 465 atomic_long_t nr_slabs; 466 atomic_long_t total_objects; 467 struct list_head full; 468 #endif 469 #endif 470 471 }; 472 473 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 474 { 475 return s->node[node]; 476 } 477 478 /* 479 * Iterator over all nodes. The body will be executed for each node that has 480 * a kmem_cache_node structure allocated (which is true for all online nodes) 481 */ 482 #define for_each_kmem_cache_node(__s, __node, __n) \ 483 for (__node = 0; __node < nr_node_ids; __node++) \ 484 if ((__n = get_node(__s, __node))) 485 486 #endif 487 488 void *slab_start(struct seq_file *m, loff_t *pos); 489 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 490 void slab_stop(struct seq_file *m, void *p); 491 int memcg_slab_show(struct seq_file *m, void *p); 492 493 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 494 495 #ifdef CONFIG_SLAB_FREELIST_RANDOM 496 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 497 gfp_t gfp); 498 void cache_random_seq_destroy(struct kmem_cache *cachep); 499 #else 500 static inline int cache_random_seq_create(struct kmem_cache *cachep, 501 unsigned int count, gfp_t gfp) 502 { 503 return 0; 504 } 505 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 506 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 507 508 #endif /* MM_SLAB_H */ 509