1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef MM_SLAB_H 3 #define MM_SLAB_H 4 /* 5 * Internal slab definitions 6 */ 7 8 #ifdef CONFIG_SLOB 9 /* 10 * Common fields provided in kmem_cache by all slab allocators 11 * This struct is either used directly by the allocator (SLOB) 12 * or the allocator must include definitions for all fields 13 * provided in kmem_cache_common in their definition of kmem_cache. 14 * 15 * Once we can do anonymous structs (C11 standard) we could put a 16 * anonymous struct definition in these allocators so that the 17 * separate allocations in the kmem_cache structure of SLAB and 18 * SLUB is no longer needed. 19 */ 20 struct kmem_cache { 21 unsigned int object_size;/* The original size of the object */ 22 unsigned int size; /* The aligned/padded/added on size */ 23 unsigned int align; /* Alignment as calculated */ 24 slab_flags_t flags; /* Active flags on the slab */ 25 size_t useroffset; /* Usercopy region offset */ 26 size_t usersize; /* Usercopy region size */ 27 const char *name; /* Slab name for sysfs */ 28 int refcount; /* Use counter */ 29 void (*ctor)(void *); /* Called on object slot creation */ 30 struct list_head list; /* List of all slab caches on the system */ 31 }; 32 33 #endif /* CONFIG_SLOB */ 34 35 #ifdef CONFIG_SLAB 36 #include <linux/slab_def.h> 37 #endif 38 39 #ifdef CONFIG_SLUB 40 #include <linux/slub_def.h> 41 #endif 42 43 #include <linux/memcontrol.h> 44 #include <linux/fault-inject.h> 45 #include <linux/kasan.h> 46 #include <linux/kmemleak.h> 47 #include <linux/random.h> 48 #include <linux/sched/mm.h> 49 50 /* 51 * State of the slab allocator. 52 * 53 * This is used to describe the states of the allocator during bootup. 54 * Allocators use this to gradually bootstrap themselves. Most allocators 55 * have the problem that the structures used for managing slab caches are 56 * allocated from slab caches themselves. 57 */ 58 enum slab_state { 59 DOWN, /* No slab functionality yet */ 60 PARTIAL, /* SLUB: kmem_cache_node available */ 61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 62 UP, /* Slab caches usable but not all extras yet */ 63 FULL /* Everything is working */ 64 }; 65 66 extern enum slab_state slab_state; 67 68 /* The slab cache mutex protects the management structures during changes */ 69 extern struct mutex slab_mutex; 70 71 /* The list of all slab caches on the system */ 72 extern struct list_head slab_caches; 73 74 /* The slab cache that manages slab cache information */ 75 extern struct kmem_cache *kmem_cache; 76 77 /* A table of kmalloc cache names and sizes */ 78 extern const struct kmalloc_info_struct { 79 const char *name; 80 unsigned long size; 81 } kmalloc_info[]; 82 83 #ifndef CONFIG_SLOB 84 /* Kmalloc array related functions */ 85 void setup_kmalloc_cache_index_table(void); 86 void create_kmalloc_caches(slab_flags_t); 87 88 /* Find the kmalloc slab corresponding for a certain size */ 89 struct kmem_cache *kmalloc_slab(size_t, gfp_t); 90 #endif 91 92 93 /* Functions provided by the slab allocators */ 94 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 95 96 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 97 slab_flags_t flags, size_t useroffset, 98 size_t usersize); 99 extern void create_boot_cache(struct kmem_cache *, const char *name, 100 size_t size, slab_flags_t flags, size_t useroffset, 101 size_t usersize); 102 103 int slab_unmergeable(struct kmem_cache *s); 104 struct kmem_cache *find_mergeable(size_t size, size_t align, 105 slab_flags_t flags, const char *name, void (*ctor)(void *)); 106 #ifndef CONFIG_SLOB 107 struct kmem_cache * 108 __kmem_cache_alias(const char *name, size_t size, size_t align, 109 slab_flags_t flags, void (*ctor)(void *)); 110 111 slab_flags_t kmem_cache_flags(unsigned long object_size, 112 slab_flags_t flags, const char *name, 113 void (*ctor)(void *)); 114 #else 115 static inline struct kmem_cache * 116 __kmem_cache_alias(const char *name, size_t size, size_t align, 117 slab_flags_t flags, void (*ctor)(void *)) 118 { return NULL; } 119 120 static inline slab_flags_t kmem_cache_flags(unsigned long object_size, 121 slab_flags_t flags, const char *name, 122 void (*ctor)(void *)) 123 { 124 return flags; 125 } 126 #endif 127 128 129 /* Legal flag mask for kmem_cache_create(), for various configurations */ 130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132 133 #if defined(CONFIG_DEBUG_SLAB) 134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 135 #elif defined(CONFIG_SLUB_DEBUG) 136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 138 #else 139 #define SLAB_DEBUG_FLAGS (0) 140 #endif 141 142 #if defined(CONFIG_SLAB) 143 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 145 SLAB_ACCOUNT) 146 #elif defined(CONFIG_SLUB) 147 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 148 SLAB_TEMPORARY | SLAB_ACCOUNT) 149 #else 150 #define SLAB_CACHE_FLAGS (0) 151 #endif 152 153 /* Common flags available with current configuration */ 154 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 155 156 /* Common flags permitted for kmem_cache_create */ 157 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 158 SLAB_RED_ZONE | \ 159 SLAB_POISON | \ 160 SLAB_STORE_USER | \ 161 SLAB_TRACE | \ 162 SLAB_CONSISTENCY_CHECKS | \ 163 SLAB_MEM_SPREAD | \ 164 SLAB_NOLEAKTRACE | \ 165 SLAB_RECLAIM_ACCOUNT | \ 166 SLAB_TEMPORARY | \ 167 SLAB_ACCOUNT) 168 169 int __kmem_cache_shutdown(struct kmem_cache *); 170 void __kmem_cache_release(struct kmem_cache *); 171 int __kmem_cache_shrink(struct kmem_cache *); 172 void __kmemcg_cache_deactivate(struct kmem_cache *s); 173 void slab_kmem_cache_release(struct kmem_cache *); 174 175 struct seq_file; 176 struct file; 177 178 struct slabinfo { 179 unsigned long active_objs; 180 unsigned long num_objs; 181 unsigned long active_slabs; 182 unsigned long num_slabs; 183 unsigned long shared_avail; 184 unsigned int limit; 185 unsigned int batchcount; 186 unsigned int shared; 187 unsigned int objects_per_slab; 188 unsigned int cache_order; 189 }; 190 191 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 192 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 193 ssize_t slabinfo_write(struct file *file, const char __user *buffer, 194 size_t count, loff_t *ppos); 195 196 /* 197 * Generic implementation of bulk operations 198 * These are useful for situations in which the allocator cannot 199 * perform optimizations. In that case segments of the object listed 200 * may be allocated or freed using these operations. 201 */ 202 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 203 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 204 205 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 206 207 /* List of all root caches. */ 208 extern struct list_head slab_root_caches; 209 #define root_caches_node memcg_params.__root_caches_node 210 211 /* 212 * Iterate over all memcg caches of the given root cache. The caller must hold 213 * slab_mutex. 214 */ 215 #define for_each_memcg_cache(iter, root) \ 216 list_for_each_entry(iter, &(root)->memcg_params.children, \ 217 memcg_params.children_node) 218 219 static inline bool is_root_cache(struct kmem_cache *s) 220 { 221 return !s->memcg_params.root_cache; 222 } 223 224 static inline bool slab_equal_or_root(struct kmem_cache *s, 225 struct kmem_cache *p) 226 { 227 return p == s || p == s->memcg_params.root_cache; 228 } 229 230 /* 231 * We use suffixes to the name in memcg because we can't have caches 232 * created in the system with the same name. But when we print them 233 * locally, better refer to them with the base name 234 */ 235 static inline const char *cache_name(struct kmem_cache *s) 236 { 237 if (!is_root_cache(s)) 238 s = s->memcg_params.root_cache; 239 return s->name; 240 } 241 242 /* 243 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 244 * That said the caller must assure the memcg's cache won't go away by either 245 * taking a css reference to the owner cgroup, or holding the slab_mutex. 246 */ 247 static inline struct kmem_cache * 248 cache_from_memcg_idx(struct kmem_cache *s, int idx) 249 { 250 struct kmem_cache *cachep; 251 struct memcg_cache_array *arr; 252 253 rcu_read_lock(); 254 arr = rcu_dereference(s->memcg_params.memcg_caches); 255 256 /* 257 * Make sure we will access the up-to-date value. The code updating 258 * memcg_caches issues a write barrier to match this (see 259 * memcg_create_kmem_cache()). 260 */ 261 cachep = READ_ONCE(arr->entries[idx]); 262 rcu_read_unlock(); 263 264 return cachep; 265 } 266 267 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 268 { 269 if (is_root_cache(s)) 270 return s; 271 return s->memcg_params.root_cache; 272 } 273 274 static __always_inline int memcg_charge_slab(struct page *page, 275 gfp_t gfp, int order, 276 struct kmem_cache *s) 277 { 278 if (!memcg_kmem_enabled()) 279 return 0; 280 if (is_root_cache(s)) 281 return 0; 282 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 283 } 284 285 static __always_inline void memcg_uncharge_slab(struct page *page, int order, 286 struct kmem_cache *s) 287 { 288 if (!memcg_kmem_enabled()) 289 return; 290 memcg_kmem_uncharge(page, order); 291 } 292 293 extern void slab_init_memcg_params(struct kmem_cache *); 294 extern void memcg_link_cache(struct kmem_cache *s); 295 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 296 void (*deact_fn)(struct kmem_cache *)); 297 298 #else /* CONFIG_MEMCG && !CONFIG_SLOB */ 299 300 /* If !memcg, all caches are root. */ 301 #define slab_root_caches slab_caches 302 #define root_caches_node list 303 304 #define for_each_memcg_cache(iter, root) \ 305 for ((void)(iter), (void)(root); 0; ) 306 307 static inline bool is_root_cache(struct kmem_cache *s) 308 { 309 return true; 310 } 311 312 static inline bool slab_equal_or_root(struct kmem_cache *s, 313 struct kmem_cache *p) 314 { 315 return true; 316 } 317 318 static inline const char *cache_name(struct kmem_cache *s) 319 { 320 return s->name; 321 } 322 323 static inline struct kmem_cache * 324 cache_from_memcg_idx(struct kmem_cache *s, int idx) 325 { 326 return NULL; 327 } 328 329 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 330 { 331 return s; 332 } 333 334 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 335 struct kmem_cache *s) 336 { 337 return 0; 338 } 339 340 static inline void memcg_uncharge_slab(struct page *page, int order, 341 struct kmem_cache *s) 342 { 343 } 344 345 static inline void slab_init_memcg_params(struct kmem_cache *s) 346 { 347 } 348 349 static inline void memcg_link_cache(struct kmem_cache *s) 350 { 351 } 352 353 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 354 355 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 356 { 357 struct kmem_cache *cachep; 358 struct page *page; 359 360 /* 361 * When kmemcg is not being used, both assignments should return the 362 * same value. but we don't want to pay the assignment price in that 363 * case. If it is not compiled in, the compiler should be smart enough 364 * to not do even the assignment. In that case, slab_equal_or_root 365 * will also be a constant. 366 */ 367 if (!memcg_kmem_enabled() && 368 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 369 return s; 370 371 page = virt_to_head_page(x); 372 cachep = page->slab_cache; 373 if (slab_equal_or_root(cachep, s)) 374 return cachep; 375 376 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 377 __func__, s->name, cachep->name); 378 WARN_ON_ONCE(1); 379 return s; 380 } 381 382 static inline size_t slab_ksize(const struct kmem_cache *s) 383 { 384 #ifndef CONFIG_SLUB 385 return s->object_size; 386 387 #else /* CONFIG_SLUB */ 388 # ifdef CONFIG_SLUB_DEBUG 389 /* 390 * Debugging requires use of the padding between object 391 * and whatever may come after it. 392 */ 393 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 394 return s->object_size; 395 # endif 396 if (s->flags & SLAB_KASAN) 397 return s->object_size; 398 /* 399 * If we have the need to store the freelist pointer 400 * back there or track user information then we can 401 * only use the space before that information. 402 */ 403 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 404 return s->inuse; 405 /* 406 * Else we can use all the padding etc for the allocation 407 */ 408 return s->size; 409 #endif 410 } 411 412 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 413 gfp_t flags) 414 { 415 flags &= gfp_allowed_mask; 416 417 fs_reclaim_acquire(flags); 418 fs_reclaim_release(flags); 419 420 might_sleep_if(gfpflags_allow_blocking(flags)); 421 422 if (should_failslab(s, flags)) 423 return NULL; 424 425 if (memcg_kmem_enabled() && 426 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 427 return memcg_kmem_get_cache(s); 428 429 return s; 430 } 431 432 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 433 size_t size, void **p) 434 { 435 size_t i; 436 437 flags &= gfp_allowed_mask; 438 for (i = 0; i < size; i++) { 439 void *object = p[i]; 440 441 kmemleak_alloc_recursive(object, s->object_size, 1, 442 s->flags, flags); 443 kasan_slab_alloc(s, object, flags); 444 } 445 446 if (memcg_kmem_enabled()) 447 memcg_kmem_put_cache(s); 448 } 449 450 #ifndef CONFIG_SLOB 451 /* 452 * The slab lists for all objects. 453 */ 454 struct kmem_cache_node { 455 spinlock_t list_lock; 456 457 #ifdef CONFIG_SLAB 458 struct list_head slabs_partial; /* partial list first, better asm code */ 459 struct list_head slabs_full; 460 struct list_head slabs_free; 461 unsigned long total_slabs; /* length of all slab lists */ 462 unsigned long free_slabs; /* length of free slab list only */ 463 unsigned long free_objects; 464 unsigned int free_limit; 465 unsigned int colour_next; /* Per-node cache coloring */ 466 struct array_cache *shared; /* shared per node */ 467 struct alien_cache **alien; /* on other nodes */ 468 unsigned long next_reap; /* updated without locking */ 469 int free_touched; /* updated without locking */ 470 #endif 471 472 #ifdef CONFIG_SLUB 473 unsigned long nr_partial; 474 struct list_head partial; 475 #ifdef CONFIG_SLUB_DEBUG 476 atomic_long_t nr_slabs; 477 atomic_long_t total_objects; 478 struct list_head full; 479 #endif 480 #endif 481 482 }; 483 484 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 485 { 486 return s->node[node]; 487 } 488 489 /* 490 * Iterator over all nodes. The body will be executed for each node that has 491 * a kmem_cache_node structure allocated (which is true for all online nodes) 492 */ 493 #define for_each_kmem_cache_node(__s, __node, __n) \ 494 for (__node = 0; __node < nr_node_ids; __node++) \ 495 if ((__n = get_node(__s, __node))) 496 497 #endif 498 499 void *slab_start(struct seq_file *m, loff_t *pos); 500 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 501 void slab_stop(struct seq_file *m, void *p); 502 void *memcg_slab_start(struct seq_file *m, loff_t *pos); 503 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 504 void memcg_slab_stop(struct seq_file *m, void *p); 505 int memcg_slab_show(struct seq_file *m, void *p); 506 507 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 508 void dump_unreclaimable_slab(void); 509 #else 510 static inline void dump_unreclaimable_slab(void) 511 { 512 } 513 #endif 514 515 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 516 517 #ifdef CONFIG_SLAB_FREELIST_RANDOM 518 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 519 gfp_t gfp); 520 void cache_random_seq_destroy(struct kmem_cache *cachep); 521 #else 522 static inline int cache_random_seq_create(struct kmem_cache *cachep, 523 unsigned int count, gfp_t gfp) 524 { 525 return 0; 526 } 527 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 528 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 529 530 #endif /* MM_SLAB_H */ 531